aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/devices.txt2
-rw-r--r--Documentation/bpf/libbpf/index.rst3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml50
-rw-r--r--Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml63
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml19
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml297
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt98
-rw-r--r--Documentation/devicetree/bindings/net/micrel.txt9
-rw-r--r--Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/mscc,miim.yaml61
-rw-r--r--Documentation/devicetree/bindings/net/mscc-miim.txt26
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml361
-rw-r--r--Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml (renamed from Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml)2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst639
-rw-r--r--Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg151
-rw-r--r--Documentation/networking/device_drivers/can/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst35
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/device_drivers/wan/index.rst18
-rw-r--r--Documentation/networking/device_drivers/wan/z8530book.rst256
-rw-r--r--Documentation/networking/devlink/devlink-linecard.rst126
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/devlink/mlxsw.rst33
-rw-r--r--Documentation/networking/dsa/dsa.rst17
-rw-r--r--Documentation/networking/ethtool-netlink.rst8
-rw-r--r--Documentation/networking/ip-sysctl.rst27
-rw-r--r--Documentation/networking/mptcp-sysctl.rst18
-rw-r--r--MAINTAINERS41
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi32
-rw-r--r--arch/arm64/include/asm/insn.h9
-rw-r--r--arch/arm64/lib/insn.c67
-rw-r--r--arch/arm64/net/bpf_jit.h17
-rw-r--r--arch/arm64/net/bpf_jit_comp.c255
-rw-r--r--arch/mips/configs/gpr_defconfig5
-rw-r--r--arch/mips/configs/mtx1_defconfig5
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/riscv/net/bpf_jit.h67
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c110
-rw-r--r--arch/sparc/include/uapi/asm/socket.h1
-rw-r--r--drivers/atm/Kconfig79
-rw-r--r--drivers/atm/Makefile4
-rw-r--r--drivers/atm/ambassador.c2400
-rw-r--r--drivers/atm/ambassador.h648
-rw-r--r--drivers/atm/firestream.c2057
-rw-r--r--drivers/atm/firestream.h502
-rw-r--r--drivers/atm/horizon.c2853
-rw-r--r--drivers/atm/horizon.h492
-rw-r--r--drivers/atm/nicstarmac.c5
-rw-r--r--drivers/atm/uPD98401.h293
-rw-r--r--drivers/atm/uPD98402.c266
-rw-r--r--drivers/atm/uPD98402.h107
-rw-r--r--drivers/atm/zatm.c1652
-rw-r--r--drivers/atm/zatm.h104
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c383
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c223
-rw-r--r--drivers/infiniband/hw/mlx5/main.c31
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/media/rc/bpf-lirc.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1462
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h325
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c294
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c131
-rw-r--r--drivers/net/can/dev/bittiming.c2
-rw-r--r--drivers/net/can/dev/rx-offload.c6
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c16
-rw-r--r--drivers/net/can/m_can/m_can.c11
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c25
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h12
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c35
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c131
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c136
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h10
-rw-r--r--drivers/net/dsa/mt7530.c332
-rw-r--r--drivers/net/dsa/mt7530.h26
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c56
-rw-r--r--drivers/net/dsa/ocelot/felix.c1
-rw-r--r--drivers/net/dsa/ocelot/felix.h1
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c2
-rw-r--r--drivers/net/dsa/qca8k.c145
-rw-r--r--drivers/net/dsa/qca8k.h12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c5
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/ethernet/alacritech/slic.h2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c87
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c136
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c409
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c304
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c191
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c24
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c22
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c141
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c310
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c489
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c53
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c737
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h204
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c245
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h170
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c194
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h299
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c463
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1176
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h357
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h367
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c508
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c335
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h284
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c42
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c28
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c81
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h13
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c11
-rw-r--r--drivers/net/ethernet/marvell/skge.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig4
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c135
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h15
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c369
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h89
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c189
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c880
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h135
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c175
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h251
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c385
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c681
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c311
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1373
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h550
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c842
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c84
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h121
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c276
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h146
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c26
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c42
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h26
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig8
-rw-r--r--drivers/net/ethernet/sfc/Makefile4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100.c27
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c6
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c56
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h14
-rw-r--r--drivers/net/ethernet/sfc/efx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c52
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h4
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h17204
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c24
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c33
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c180
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c42
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c43
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c235
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h10
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c12
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c18
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c5
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c55
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hamradio/Kconfig34
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/dmascc.c1450
-rw-r--r--drivers/net/hyperv/hyperv_net.h69
-rw-r--r--drivers/net/hyperv/netvsc.c16
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c101
-rw-r--r--drivers/net/hyperv/netvsc_drv.c150
-rw-r--r--drivers/net/ieee802154/Kconfig7
-rw-r--r--drivers/net/ieee802154/at86rf230.c163
-rw-r--r--drivers/net/ieee802154/atusb.c37
-rw-r--r--drivers/net/ieee802154/ca8210.c181
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ipa/ipa_endpoint.c9
-rw-r--r--drivers/net/mdio/mdio-aspeed.c138
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c81
-rw-r--r--drivers/net/netdevsim/fib.c9
-rw-r--r--drivers/net/pcs/pcs-xpcs.c6
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/adin1100.c292
-rw-r--r--drivers/net/phy/bcm87xx.c36
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/micrel.c253
-rw-r--r--drivers/net/phy/microchip_t1.c50
-rw-r--r--drivers/net/phy/phy-c45.c257
-rw-r--r--drivers/net/phy/phy-core.c3
-rw-r--r--drivers/net/phy/phy.c18
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c64
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/rndis_host.c47
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/wan/Kconfig72
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/lmc/Makefile18
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c65
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h255
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2009
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1206
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c106
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h468
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c27
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c153
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c572
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h168
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c186
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h43
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c629
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c285
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c984
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c748
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h46
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c373
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c262
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h26
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c810
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h402
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c763
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h45
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c98
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c29
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c64
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c119
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/airport.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h6
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c4
-rw-r--r--drivers/net/wireless/purelifi/Kconfig17
-rw-r--r--drivers/net/wireless/purelifi/Makefile2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Kconfig14
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Makefile3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.c98
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.h70
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/firmware.c276
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/intf.h52
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c754
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h184
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c891
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.h198
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c146
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c29
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c73
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c182
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h281
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c70
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c299
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h388
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c713
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h81
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c957
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h389
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c451
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c34
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h1887
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c513
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c605
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c2310
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c4023
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c781
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c19470
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h36
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c48
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c250
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h107
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/silabs/Kconfig18
-rw-r--r--drivers/net/wireless/silabs/Makefile3
-rw-r--r--drivers/net/wireless/silabs/wfx/Kconfig (renamed from drivers/staging/wfx/Kconfig)0
-rw-r--r--drivers/net/wireless/silabs/wfx/Makefile (renamed from drivers/staging/wfx/Makefile)0
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c (renamed from drivers/staging/wfx/bh.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.h (renamed from drivers/staging/wfx/bh.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h (renamed from drivers/staging/wfx/bus.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c (renamed from drivers/staging/wfx/bus_sdio.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c (renamed from drivers/staging/wfx/bus_spi.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c (renamed from drivers/staging/wfx/data_rx.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.h (renamed from drivers/staging/wfx/data_rx.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c (renamed from drivers/staging/wfx/data_tx.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.h (renamed from drivers/staging/wfx/data_tx.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.c (renamed from drivers/staging/wfx/debug.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.h (renamed from drivers/staging/wfx/debug.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c (renamed from drivers/staging/wfx/fwio.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.h (renamed from drivers/staging/wfx/fwio.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_cmd.h (renamed from drivers/staging/wfx/hif_api_cmd.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_general.h (renamed from drivers/staging/wfx/hif_api_general.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_mib.h (renamed from drivers/staging/wfx/hif_api_mib.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.c (renamed from drivers/staging/wfx/hif_rx.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.h (renamed from drivers/staging/wfx/hif_rx.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c (renamed from drivers/staging/wfx/hif_tx.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.h (renamed from drivers/staging/wfx/hif_tx.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.c (renamed from drivers/staging/wfx/hif_tx_mib.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.h (renamed from drivers/staging/wfx/hif_tx_mib.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.c (renamed from drivers/staging/wfx/hwio.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.h (renamed from drivers/staging/wfx/hwio.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c (renamed from drivers/staging/wfx/key.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/key.h (renamed from drivers/staging/wfx/key.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c (renamed from drivers/staging/wfx/main.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/main.h (renamed from drivers/staging/wfx/main.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c (renamed from drivers/staging/wfx/queue.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.h (renamed from drivers/staging/wfx/queue.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c (renamed from drivers/staging/wfx/scan.c)0
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.h (renamed from drivers/staging/wfx/scan.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c (renamed from drivers/staging/wfx/sta.c)8
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h (renamed from drivers/staging/wfx/sta.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/traces.h (renamed from drivers/staging/wfx/traces.h)0
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h (renamed from drivers/staging/wfx/wfx.h)0
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c18
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c52
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c241
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c18
-rw-r--r--drivers/net/wwan/wwan_hwsim.c22
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/staging/Kconfig1
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/wfx/TODO6
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--include/linux/bpf-cgroup.h8
-rw-r--r--include/linux/bpf.h231
-rw-r--r--include/linux/bpf_local_storage.h4
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/btf.h23
-rw-r--r--include/linux/can/rx-offload.h4
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/icmpv6.h11
-rw-r--r--include/linux/ieee802154.h81
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/mdio.h70
-rw-r--r--include/linux/mlx5/accel.h156
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h18
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h211
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/netdevice.h161
-rw-r--r--include/linux/phy.h5
-rw-r--r--include/linux/phylink.h6
-rw-r--r--include/linux/ptp_clock_kernel.h4
-rw-r--r--include/linux/qed/qed_nvmetcp_ip_services_if.h29
-rw-r--r--include/linux/rtnetlink.h1
-rw-r--r--include/linux/skbuff.h91
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h131
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/linux/sysctl.h9
-rw-r--r--include/linux/usb/rndis_host.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/act_api.h3
-rw-r--r--include/net/cfg802154.h6
-rw-r--r--include/net/devlink.h66
-rw-r--r--include/net/flow_dissector.h9
-rw-r--r--include/net/if_inet6.h8
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/mac80211.h90
-rw-r--r--include/net/mac802154.h19
-rw-r--r--include/net/mptcp.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h8
-rw-r--r--include/net/page_pool.h21
-rw-r--r--include/net/ping.h4
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/route.h36
-rw-r--r--include/net/rtnetlink.h16
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h42
-rw-r--r--include/net/strparser.h4
-rw-r--r--include/net/tc_act/tc_gact.h15
-rw-r--r--include/net/tc_act/tc_skbedit.h13
-rw-r--r--include/net/tcp.h45
-rw-r--r--include/net/tls.h15
-rw-r--r--include/net/udp.h8
-rw-r--r--include/rdma/ib_verbs.h8
-rw-r--r--include/soc/mscc/ocelot.h7
-rw-r--r--include/trace/events/mptcp.h6
-rw-r--r--include/trace/events/skb.h21
-rw-r--r--include/trace/events/tcp.h47
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/atm_zatm.h47
-rw-r--r--include/uapi/linux/bpf.h12
-rw-r--r--include/uapi/linux/btf.h4
-rw-r--r--include/uapi/linux/devlink.h28
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/ethtool_netlink.h1
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mdio.h75
-rw-r--r--include/uapi/linux/mptcp.h8
-rw-r--r--include/uapi/linux/neighbour.h2
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tipc_config.h28
-rw-r--r--kernel/bpf/arraymap.c44
-rw-r--r--kernel/bpf/bloom_filter.c6
-rw-r--r--kernel/bpf/bpf_inode_storage.c10
-rw-r--r--kernel/bpf/bpf_iter.c32
-rw-r--r--kernel/bpf/bpf_local_storage.c29
-rw-r--r--kernel/bpf/bpf_struct_ops.c10
-rw-r--r--kernel/bpf/bpf_task_storage.c9
-rw-r--r--kernel/bpf/btf.c634
-rw-r--r--kernel/bpf/cgroup.c106
-rw-r--r--kernel/bpf/cpumap.c6
-rw-r--r--kernel/bpf/devmap.c10
-rw-r--r--kernel/bpf/hashtab.c88
-rw-r--r--kernel/bpf/helpers.c24
-rw-r--r--kernel/bpf/local_storage.c7
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/bpf/map_in_map.c5
-rw-r--r--kernel/bpf/queue_stack_maps.c10
-rw-r--r--kernel/bpf/reuseport_array.c6
-rw-r--r--kernel/bpf/ringbuf.c10
-rw-r--r--kernel/bpf/stackmap.c7
-rw-r--r--kernel/bpf/syscall.c431
-rw-r--r--kernel/bpf/task_iter.c1
-rw-r--r--kernel/bpf/verifier.c507
-rw-r--r--kernel/sysctl.c79
-rw-r--r--kernel/trace/bpf_trace.c11
-rw-r--r--lib/test_bpf.c315
-rw-r--r--lib/test_sysctl.c32
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/atm/common.c4
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/bluetooth/af_bluetooth.c7
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bpf/test_run.c67
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_fdb.c157
-rw-r--r--net/bridge/br_mdb.c12
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_private.h18
-rw-r--r--net/bridge/br_switchdev.c3
-rw-r--r--net/bridge/br_sysfs_br.c6
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/can/isotp.c4
-rw-r--r--net/can/j1939/socket.c4
-rw-r--r--net/can/raw.c8
-rw-r--r--net/core/bpf_sk_storage.c11
-rw-r--r--net/core/datagram.c7
-rw-r--r--net/core/datagram.h15
-rw-r--r--net/core/dev.c108
-rw-r--r--net/core/dev.h91
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/devlink.c954
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/link_watch.c1
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-procfs.c2
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/core/page_pool.c78
-rw-r--r--net/core/rtnetlink.c403
-rw-r--r--net/core/skbuff.c63
-rw-r--r--net/core/sock.c95
-rw-r--r--net/core/sock_map.c10
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c6
-rw-r--r--net/dsa/dsa.c9
-rw-r--r--net/dsa/dsa_priv.h28
-rw-r--r--net/dsa/port.c128
-rw-r--r--net/dsa/slave.c31
-rw-r--r--net/dsa/switch.c188
-rw-r--r--net/dsa/tag_8021q.c10
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ethtool/common.c3
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ethtool/rings.c54
-rw-r--r--net/ieee802154/socket.c12
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/datagram.c7
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_rules.c2
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/icmp.c77
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_forward.c13
-rw-r--r--net/ipv4/ip_gre.c50
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c4
-rw-r--r--net/ipv4/ping.c40
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c51
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c68
-rw-r--r--net/ipv4/tcp_bbr.c20
-rw-r--r--net/ipv4/tcp_bic.c14
-rw-r--r--net/ipv4/tcp_bpf.c15
-rw-r--r--net/ipv4/tcp_cdg.c30
-rw-r--r--net/ipv4/tcp_cong.c30
-rw-r--r--net/ipv4/tcp_cubic.c22
-rw-r--r--net/ipv4/tcp_dctcp.c11
-rw-r--r--net/ipv4/tcp_highspeed.c18
-rw-r--r--net/ipv4/tcp_htcp.c10
-rw-r--r--net/ipv4/tcp_hybla.c18
-rw-r--r--net/ipv4/tcp_illinois.c12
-rw-r--r--net/ipv4/tcp_input.c167
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_nv.c24
-rw-r--r--net/ipv4/tcp_output.c30
-rw-r--r--net/ipv4/tcp_rate.c2
-rw-r--r--net/ipv4/tcp_recovery.c15
-rw-r--r--net/ipv4/tcp_scalable.c4
-rw-r--r--net/ipv4/tcp_vegas.c21
-rw-r--r--net/ipv4/tcp_veno.c24
-rw-r--r--net/ipv4/tcp_westwood.c3
-rw-r--r--net/ipv4/tcp_yeah.c30
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/udp_bpf.c17
-rw-r--r--net/ipv4/udp_impl.h4
-rw-r--r--net/ipv6/addrconf.c51
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c44
-rw-r--r--net/ipv6/icmp.c31
-rw-r--r--net/ipv6/ip6_gre.c34
-rw-r--r--net/ipv6/ip6_input.c41
-rw-r--r--net/ipv6/ip6_output.c34
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ndisc.c20
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c4
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sysctl_net_ipv6.c6
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/udp_impl.h4
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c4
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/agg-rx.c12
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/airtime.c4
-rw-r--r--net/mac80211/cfg.c33
-rw-r--r--net/mac80211/chan.c8
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/debugfs_sta.c12
-rw-r--r--net/mac80211/eht.c6
-rw-r--r--net/mac80211/ethtool.c4
-rw-r--r--net/mac80211/he.c8
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ibss.c26
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_plink.c24
-rw-r--r--net/mac80211/mlme.c18
-rw-r--r--net/mac80211/ocb.c2
-rw-r--r--net/mac80211/rate.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c23
-rw-r--r--net/mac80211/rx.c131
-rw-r--r--net/mac80211/s1g.c4
-rw-r--r--net/mac80211/sta_info.c110
-rw-r--r--net/mac80211/sta_info.h155
-rw-r--r--net/mac80211/status.c41
-rw-r--r--net/mac80211/tdls.c26
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/tx.c26
-rw-r--r--net/mac80211/vht.c78
-rw-r--r--net/mac802154/cfg.c1
-rw-r--r--net/mac802154/ieee802154_i.h2
-rw-r--r--net/mac802154/main.c54
-rw-r--r--net/mac802154/util.c22
-rw-r--r--net/mctp/af_mctp.c4
-rw-r--r--net/mctp/test/route-test.c8
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/mptcp/Makefile2
-rw-r--r--net/mptcp/ctrl.c21
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/mptcp_diag.c105
-rw-r--r--net/mptcp/options.c12
-rw-r--r--net/mptcp/pm.c111
-rw-r--r--net/mptcp/pm_netlink.c266
-rw-r--r--net/mptcp/pm_userspace.c429
-rw-r--r--net/mptcp/protocol.c113
-rw-r--r--net/mptcp/protocol.h87
-rw-r--r--net/mptcp/sockopt.c6
-rw-r--r--net/mptcp/subflow.c77
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_bpf.c22
-rw-r--r--net/netfilter/nf_conntrack_ecache.c19
-rw-r--r--net/netfilter/nf_conntrack_netlink.c68
-rw-r--r--net/netfilter/nf_log_syslog.c136
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c14
-rw-r--r--net/netfilter/nft_bitwise.c13
-rw-r--r--net/netfilter/nft_fib.c4
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/core.c1
-rw-r--r--net/nfc/llcp_sock.c3
-rw-r--r--net/nfc/rawsock.c3
-rw-r--r--net/packet/af_packet.c22
-rw-r--r--net/phonet/datagram.c4
-rw-r--r--net/phonet/pep.c7
-rw-r--r--net/qrtr/af_qrtr.c3
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ct.c3
-rw-r--r--net/sched/act_gact.c13
-rw-r--r--net/sched/act_gate.c3
-rw-r--r--net/sched/act_mirred.c4
-rw-r--r--net/sched/act_mpls.c10
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/act_police.c20
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_skbedit.c65
-rw-r--r--net/sched/act_tunnel_key.c4
-rw-r--r--net/sched/act_vlan.c4
-rw-r--r--net/sched/cls_api.c22
-rw-r--r--net/sched/cls_flower.c104
-rw-r--r--net/sched/cls_matchall.c19
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/socket.c18
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/socket.c15
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tls/tls_device.c6
-rw-r--r--net/tls/tls_sw.c489
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/unix/unix_bpf.c5
-rw-r--r--net/vmw_vsock/virtio_transport.c197
-rw-r--r--net/vmw_vsock/vmci_transport.c5
-rw-r--r--net/wireless/nl80211.c1
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--net/xdp/xsk_queue.h4
-rw-r--r--net/xdp/xskmap.c6
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--samples/bpf/Makefile10
-rw-r--r--samples/bpf/cpustat_user.c1
-rw-r--r--samples/bpf/hbm.c5
-rw-r--r--samples/bpf/ibumad_user.c1
-rw-r--r--samples/bpf/map_perf_test_user.c1
-rw-r--r--samples/bpf/offwaketime_user.c1
-rw-r--r--samples/bpf/sockex2_user.c1
-rw-r--r--samples/bpf/sockex3_user.c1
-rw-r--r--samples/bpf/spintest_user.c1
-rw-r--r--samples/bpf/syscall_tp_user.c4
-rw-r--r--samples/bpf/task_fd_query_user.c1
-rw-r--r--samples/bpf/test_lru_dist.c1
-rw-r--r--samples/bpf/test_map_in_map_user.c1
-rw-r--r--samples/bpf/test_overhead_user.c1
-rw-r--r--samples/bpf/tracex2_user.c1
-rw-r--r--samples/bpf/tracex3_user.c1
-rw-r--r--samples/bpf/tracex4_user.c1
-rw-r--r--samples/bpf/tracex5_user.c1
-rw-r--r--samples/bpf/tracex6_user.c1
-rw-r--r--samples/bpf/xdp1_user.c3
-rw-r--r--samples/bpf/xdp_adjust_tail_user.c1
-rw-r--r--samples/bpf/xdp_monitor_user.c1
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c1
-rw-r--r--samples/bpf/xdp_redirect_map_multi_user.c1
-rw-r--r--samples/bpf/xdp_redirect_user.c1
-rw-r--r--samples/bpf/xdp_router_ipv4.bpf.c180
-rw-r--r--samples/bpf/xdp_router_ipv4_kern.c186
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c456
-rw-r--r--samples/bpf/xdp_rxq_info_user.c1
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c1
-rw-r--r--samples/bpf/xdp_sample_user.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c1
-rw-r--r--samples/bpf/xdpsock_user.c9
-rw-r--r--samples/bpf/xsk_fwd.c7
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/feature.c4
-rw-r--r--tools/bpf/bpftool/link.c3
-rw-r--r--tools/bpf/bpftool/main.c6
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/perf.c112
-rw-r--r--tools/bpf/bpftool/pids.c1
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/bpf/bpftool/struct_ops.c2
-rw-r--r--tools/bpf/bpftool/tracelog.c2
-rw-r--r--tools/bpf/runqslower/runqslower.c18
-rw-r--r--tools/include/uapi/asm-generic/socket.h2
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--tools/include/uapi/linux/bpf.h12
-rw-r--r--tools/include/uapi/linux/btf.h4
-rw-r--r--tools/lib/bpf/Build3
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/bpf/bpf.c34
-rw-r--r--tools/lib/bpf/bpf_helpers.h7
-rw-r--r--tools/lib/bpf/bpf_tracing.h23
-rw-r--r--tools/lib/bpf/btf.c15
-rw-r--r--tools/lib/bpf/libbpf.c808
-rw-r--r--tools/lib/bpf/libbpf.h123
-rw-r--r--tools/lib/bpf/libbpf.map1
-rw-r--r--tools/lib/bpf/libbpf_internal.h37
-rw-r--r--tools/lib/bpf/relo_core.c104
-rw-r--r--tools/lib/bpf/relo_core.h6
-rw-r--r--tools/lib/bpf/usdt.bpf.h259
-rw-r--r--tools/lib/bpf/usdt.c1518
-rw-r--r--tools/testing/selftests/bpf/Makefile25
-rw-r--r--tools/testing/selftests/bpf/bench.c1
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c6
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c85
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c100
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c421
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c15
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c27
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c15
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c15
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c190
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c4
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h5
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h4
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_urandom_usdt.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c96
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt_multispec.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c12
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c2
-rw-r--r--tools/testing/selftests/bpf/sdt-config.h6
-rw-r--r--tools/testing/selftests/bpf/sdt.h513
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py2
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c7
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c4
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c43
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c70
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c487
-rw-r--r--tools/testing/selftests/bpf/test_progs.h64
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_sock.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c5
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c6
-rw-r--r--tools/testing/selftests/bpf/test_tag.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c55
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c5
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c91
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h8
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c63
-rw-r--r--tools/testing/selftests/bpf/urandom_read_aux.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib1.c13
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib2.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c20
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c469
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c6
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c1
-rw-r--r--tools/testing/selftests/bpf/xdping.c8
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c6
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/forwarding.config2
l---------tools/testing/selftests/drivers/net/dsa/lib.sh1
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh1
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh341
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_burst.sh480
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/basic_qos.sh253
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh327
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh24
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh12
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh103
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/forwarding/lib.sh133
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh299
-rwxr-xr-xtools/testing/selftests/net/forwarding/no_forwarding.sh261
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh18
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_vid_1.sh27
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh235
-rw-r--r--tools/testing/selftests/net/mptcp/config8
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh38
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh232
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c645
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh779
-rwxr-xr-xtools/testing/selftests/net/ndisc_unsolicited_na_test.sh255
-rwxr-xr-xtools/testing/selftests/net/vrf_strict_mode_test.sh48
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh50
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh23
1223 files changed, 102525 insertions, 40535 deletions
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index c07dc0ee860e..9764d6edb189 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -1933,7 +1933,7 @@
...
255= /dev/umem/d15p15 15th partition of 16th board.
- 117 char COSA/SRP synchronous serial card
+ 117 char [REMOVED] COSA/SRP synchronous serial card
0 = /dev/cosa0c0 1st board, 1st channel
1 = /dev/cosa0c1 1st board, 2nd channel
...
diff --git a/Documentation/bpf/libbpf/index.rst b/Documentation/bpf/libbpf/index.rst
index 4e8c656b539a..3722537d1384 100644
--- a/Documentation/bpf/libbpf/index.rst
+++ b/Documentation/bpf/libbpf/index.rst
@@ -6,14 +6,13 @@ libbpf
.. toctree::
:maxdepth: 1
+ API Documentation <https://libbpf.readthedocs.io/en/latest/api.html>
libbpf_naming_convention
libbpf_build
This is documentation for libbpf, a userspace library for loading and
interacting with bpf programs.
-For API documentation see the `versioned API documentation site <https://libbpf.readthedocs.io/en/latest/api.html>`_.
-
All general BPF questions, including kernel functionality, libbpf APIs and
their application, should be sent to bpf@vger.kernel.org mailing list.
You can `subscribe <http://vger.kernel.org/vger-lists.html#bpf>`_ to the
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml
new file mode 100644
index 000000000000..9fbeb626ab23
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek PCIE Mirror Controller for MT7622
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+ - Felix Fietkau <nbd@nbd.name>
+
+description:
+ The mediatek PCIE mirror provides a configuration interface for PCIE
+ controller on MT7622 soc.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt7622-pcie-mirror
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie_mirror: pcie-mirror@10000400 {
+ compatible = "mediatek,mt7622-pcie-mirror", "syscon";
+ reg = <0 0x10000400 0 0x10>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
new file mode 100644
index 000000000000..787d6673f952
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7622-wed.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek Wireless Ethernet Dispatch Controller for MT7622
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+ - Felix Fietkau <nbd@nbd.name>
+
+description:
+ The mediatek wireless ethernet dispatch controller can be configured to
+ intercept and handle access to the WLAN DMA queues and PCIe interrupts
+ and implement hardware flow offloading from ethernet to WLAN.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt7622-wed
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ wed0: wed@1020a000 {
+ compatible = "mediatek,mt7622-wed","syscon";
+ reg = <0 0x1020a000 0 0x1000>;
+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
index 1c88820cbcdf..f81eda8cb0a5 100644
--- a/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
@@ -20,10 +20,14 @@ allOf:
properties:
compatible:
const: aspeed,ast2600-mdio
+
reg:
maxItems: 1
description: The register range of the MDIO controller instance
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -34,11 +38,13 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/clock/ast2600-clock.h>
mdio0: mdio@1e650000 {
compatible = "aspeed,ast2600-mdio";
reg = <0x1e650000 0x8>;
#address-cells = <1>;
#size-cells = <0>;
+ resets = <&syscon ASPEED_RESET_MII>;
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
diff --git a/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
new file mode 100644
index 000000000000..fb34d971dcb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/ctu,ctucanfd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CTU CAN FD Open-source IP Core Device Tree Bindings
+
+description: |
+ Open-source CAN FD IP core developed at the Czech Technical University in Prague
+
+ The core sources and documentation on project page
+ [1] sources : https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core
+ [2] datasheet : https://canbus.pages.fel.cvut.cz/ctucanfd_ip_core/doc/Datasheet.pdf
+
+ Integration in Xilinx Zynq SoC based system together with
+ OpenCores SJA1000 compatible controllers
+ [3] project : https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top
+ Martin Jerabek dimploma thesis with integration and testing
+ framework description
+ [4] PDF : https://dspace.cvut.cz/bitstream/handle/10467/80366/F3-DP-2019-Jerabek-Martin-Jerabek-thesis-2019-canfd.pdf
+
+maintainers:
+ - Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ - Ondrej Ille <ondrej.ille@gmail.com>
+ - Martin Jerabek <martin.jerabek01@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: ctu,ctucanfd-2
+ - const: ctu,ctucanfd
+ - const: ctu,ctucanfd
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: |
+ phandle of reference clock (100 MHz is appropriate
+ for FPGA implementation on Zynq-7000 system).
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ ctu_can_fd_0: can@43c30000 {
+ compatible = "ctu,ctucanfd";
+ interrupts = <0 30 4>;
+ clocks = <&clkc 15>;
+ reg = <0x43c30000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
index b3826af6bd6e..7a73057707b4 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
@@ -5,8 +5,8 @@ $id: http://devicetree.org/schemas/net/can/microchip,mcp251xfd.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title:
- Microchip MCP2517FD and MCP2518FD stand-alone CAN controller device tree
- bindings
+ Microchip MCP2517FD, MCP2518FD and MCP251863 stand-alone CAN
+ controller device tree bindings
maintainers:
- Marc Kleine-Budde <mkl@pengutronix.de>
@@ -17,13 +17,14 @@ allOf:
properties:
compatible:
oneOf:
- - const: microchip,mcp2517fd
- description: for MCP2517FD
- - const: microchip,mcp2518fd
- description: for MCP2518FD
- - const: microchip,mcp251xfd
- description: to autodetect chip variant
-
+ - enum:
+ - microchip,mcp2517fd
+ - microchip,mcp2518fd
+ - microchip,mcp251xfd
+ - items:
+ - enum:
+ - microchip,mcp251863
+ - const: microchip,mcp2518fd
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index f98c53dc1894..9fc137fafed9 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -23,6 +23,7 @@ properties:
- renesas,r8a774e1-canfd # RZ/G2H
- renesas,r8a7795-canfd # R-Car H3
- renesas,r8a7796-canfd # R-Car M3-W
+ - renesas,r8a77961-canfd # R-Car M3-W+
- renesas,r8a77965-canfd # R-Car M3-N
- renesas,r8a77970-canfd # R-Car V3M
- renesas,r8a77980-canfd # R-Car V3H
@@ -32,6 +33,7 @@ properties:
- items:
- enum:
+ - renesas,r9a07g043-canfd # RZ/G2UL
- renesas,r9a07g044-canfd # RZ/G2{L,LC}
- renesas,r9a07g054-canfd # RZ/V2L
- const: renesas,rzg2l-canfd # RZ/G2L family
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 6cd3d853dcba..e5b628736930 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -84,13 +84,6 @@ properties:
phys:
maxItems: 1
- phy-names:
- const: sgmii-phy
- description:
- Required with ZynqMP SoC when in SGMII mode.
- Should reference PS-GTR generic PHY device for this controller
- instance. See ZynqMP example.
-
resets:
maxItems: 1
description:
@@ -204,7 +197,6 @@ examples:
reset-names = "gem1_rst";
status = "okay";
phy-mode = "sgmii";
- phy-names = "sgmii-phy";
phys = <&psgtr 1 PHY_TYPE_SGMII 1 1>;
fixed-link {
speed = <1000>;
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index ee42328a109d..ed1415a4381f 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -77,6 +77,15 @@ properties:
description:
Maximum PHY supported speed in Mbits / seconds.
+ phy-10base-t1l-2.4vpp:
+ description: |
+ tristate, request/disable 2.4 Vpp operating mode. The values are:
+ 0: Disable 2.4 Vpp operating mode.
+ 1: Request 2.4 Vpp operating mode from link partner.
+ Absence of this property will leave configuration to default values.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [0, 1]
+
broken-turn-around:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
new file mode 100644
index 000000000000..43cc4024ef98
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mediatek,net.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Frame Engine Ethernet controller
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+ - Felix Fietkau <nbd@nbd.name>
+
+description:
+ The frame engine ethernet controller can be found on MediaTek SoCs. These SoCs
+ have dual GMAC ports.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt2701-eth
+ - mediatek,mt7623-eth
+ - mediatek,mt7622-eth
+ - mediatek,mt7629-eth
+ - ralink,rt5350-eth
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 3
+
+ reset-names:
+ items:
+ - const: fe
+ - const: gmac
+ - const: ppe
+
+ mediatek,ethsys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon node that handles the port setup.
+
+ cci-control-port: true
+
+ mediatek,hifsys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the mediatek hifsys controller used to provide various clocks
+ and reset to the system.
+
+ mediatek,sgmiisys:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 2
+ items:
+ maxItems: 1
+ description:
+ A list of phandle to the syscon node that handles the SGMII setup which is required for
+ those SoCs equipped with SGMII.
+
+ dma-coherent: true
+
+ mdio-bus:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt2701-eth
+ - mediatek,mt7623-eth
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: ethif
+ - const: esw
+ - const: gp1
+ - const: gp2
+
+ mediatek,pctl:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon node that handles the ports slew rate and
+ driver current.
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt7622-eth
+ then:
+ properties:
+ clocks:
+ minItems: 11
+ maxItems: 11
+
+ clock-names:
+ items:
+ - const: ethif
+ - const: esw
+ - const: gp0
+ - const: gp1
+ - const: gp2
+ - const: sgmii_tx250m
+ - const: sgmii_rx250m
+ - const: sgmii_cdr_ref
+ - const: sgmii_cdr_fb
+ - const: sgmii_ck
+ - const: eth2pll
+
+ mediatek,sgmiisys:
+ minItems: 1
+ maxItems: 1
+
+ mediatek,wed:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 2
+ maxItems: 2
+ items:
+ maxItems: 1
+ description:
+ List of phandles to wireless ethernet dispatch nodes.
+
+ mediatek,pcie-mirror:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the mediatek pcie-mirror controller.
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt7629-eth
+ then:
+ properties:
+ clocks:
+ minItems: 17
+ maxItems: 17
+
+ clock-names:
+ items:
+ - const: ethif
+ - const: sgmiitop
+ - const: esw
+ - const: gp0
+ - const: gp1
+ - const: gp2
+ - const: fe
+ - const: sgmii_tx250m
+ - const: sgmii_rx250m
+ - const: sgmii_cdr_ref
+ - const: sgmii_cdr_fb
+ - const: sgmii2_tx250m
+ - const: sgmii2_rx250m
+ - const: sgmii2_cdr_ref
+ - const: sgmii2_cdr_fb
+ - const: sgmii_ck
+ - const: eth2pll
+
+ mediatek,infracfg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon node that handles the path from GMAC to
+ PHY variants.
+
+ mediatek,sgmiisys:
+ minItems: 2
+ maxItems: 2
+
+patternProperties:
+ "^mac@[0-1]$":
+ type: object
+ additionalProperties: false
+ allOf:
+ - $ref: ethernet-controller.yaml#
+ description:
+ Ethernet MAC node
+ properties:
+ compatible:
+ const: mediatek,eth-mac
+
+ reg:
+ maxItems: 1
+
+ phy-handle: true
+
+ phy-mode: true
+
+ required:
+ - reg
+ - compatible
+ - phy-handle
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - mediatek,ethsys
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/mt7622-clk.h>
+ #include <dt-bindings/power/mt7622-power.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ethernet: ethernet@1b100000 {
+ compatible = "mediatek,mt7622-eth";
+ reg = <0 0x1b100000 0 0x20000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_ETH_SEL>,
+ <&ethsys CLK_ETH_ESW_EN>,
+ <&ethsys CLK_ETH_GP0_EN>,
+ <&ethsys CLK_ETH_GP1_EN>,
+ <&ethsys CLK_ETH_GP2_EN>,
+ <&sgmiisys CLK_SGMII_TX250M_EN>,
+ <&sgmiisys CLK_SGMII_RX250M_EN>,
+ <&sgmiisys CLK_SGMII_CDR_REF>,
+ <&sgmiisys CLK_SGMII_CDR_FB>,
+ <&topckgen CLK_TOP_SGMIIPLL>,
+ <&apmixedsys CLK_APMIXED_ETH2PLL>;
+ clock-names = "ethif", "esw", "gp0", "gp1", "gp2",
+ "sgmii_tx250m", "sgmii_rx250m",
+ "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck",
+ "eth2pll";
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys>;
+ cci-control-port = <&cci_control2>;
+ mediatek,pcie-mirror = <&pcie_mirror>;
+ mediatek,hifsys = <&hifsys>;
+ dma-coherent;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio0: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ reg = <0>;
+ };
+
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
deleted file mode 100644
index 72d03e07cf7c..000000000000
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-MediaTek Frame Engine Ethernet controller
-=========================================
-
-The frame engine ethernet controller can be found on MediaTek SoCs. These SoCs
-have dual GMAC each represented by a child node..
-
-* Ethernet controller node
-
-Required properties:
-- compatible: Should be
- "mediatek,mt2701-eth": for MT2701 SoC
- "mediatek,mt7623-eth", "mediatek,mt2701-eth": for MT7623 SoC
- "mediatek,mt7622-eth": for MT7622 SoC
- "mediatek,mt7629-eth": for MT7629 SoC
- "ralink,rt5350-eth": for Ralink Rt5350F and MT7628/88 SoC
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the three frame engines interrupts in numeric
- order. These are fe_int0, fe_int1 and fe_int2.
-- clocks: the clock used by the core
-- clock-names: the names of the clock listed in the clocks property. These are
- "ethif", "esw", "gp2", "gp1" : For MT2701 and MT7623 SoC
- "ethif", "esw", "gp0", "gp1", "gp2", "sgmii_tx250m", "sgmii_rx250m",
- "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" : For MT7622 SoC
- "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "sgmii_tx250m",
- "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii2_tx250m",
- "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", "sgmii_ck",
- "eth2pll" : For MT7629 SoC.
-- power-domains: phandle to the power domain that the ethernet is part of
-- resets: Should contain phandles to the ethsys reset signals
-- reset-names: Should contain the names of reset signal listed in the resets
- property
- These are "fe", "gmac" and "ppe"
-- mediatek,ethsys: phandle to the syscon node that handles the port setup
-- mediatek,infracfg: phandle to the syscon node that handles the path from
- GMAC to PHY variants, which is required for MT7629 SoC.
-- mediatek,sgmiisys: a list of phandles to the syscon node that handles the
- SGMII setup which is required for those SoCs equipped with SGMII such
- as MT7622 and MT7629 SoC. And MT7622 have only one set of SGMII shared
- by GMAC1 and GMAC2; MT7629 have two independent sets of SGMII directed
- to GMAC1 and GMAC2, respectively.
-- mediatek,pctl: phandle to the syscon node that handles the ports slew rate
- and driver current: only for MT2701 and MT7623 SoC
-
-* Ethernet MAC node
-
-Required properties:
-- compatible: Should be "mediatek,eth-mac"
-- reg: The number of the MAC
-- phy-handle: see ethernet.txt file in the same directory and
- the phy-mode "trgmii" required being provided when reg
- is equal to 0 and the MAC uses fixed-link to connect
- with internal switch such as MT7530.
-
-Example:
-
-eth: ethernet@1b100000 {
- compatible = "mediatek,mt7623-eth";
- reg = <0 0x1b100000 0 0x20000>;
- clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
- <&ethsys CLK_ETHSYS_ESW>,
- <&ethsys CLK_ETHSYS_GP2>,
- <&ethsys CLK_ETHSYS_GP1>;
- clock-names = "ethif", "esw", "gp2", "gp1";
- interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
- GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
- GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
- resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
- reset-names = "eth";
- mediatek,ethsys = <&ethsys>;
- mediatek,pctl = <&syscfg_pctl_a>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- gmac1: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-handle = <&phy0>;
- };
-
- gmac2: mac@1 {
- compatible = "mediatek,eth-mac";
- reg = <1>;
- phy-handle = <&phy1>;
- };
-
- mdio-bus {
- phy0: ethernet-phy@0 {
- reg = <0>;
- phy-mode = "rgmii";
- };
-
- phy1: ethernet-phy@1 {
- reg = <1>;
- phy-mode = "rgmii";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt
index 8d157f0295a5..a9ed691ffb03 100644
--- a/Documentation/devicetree/bindings/net/micrel.txt
+++ b/Documentation/devicetree/bindings/net/micrel.txt
@@ -45,3 +45,12 @@ Optional properties:
In fiber mode, auto-negotiation is disabled and the PHY can only work in
100base-fx (full and half duplex) modes.
+
+ - coma-mode-gpios: If present the given gpio will be deasserted when the
+ PHY is probed.
+
+ Some PHYs have a COMA mode input pin which puts the PHY into
+ isolate and power-down mode. On some boards this input is connected
+ to a GPIO of the SoC.
+
+ Supported on the LAN8814.
diff --git a/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml b/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml
index 13812768b923..dc116f14750e 100644
--- a/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml
+++ b/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml
@@ -39,6 +39,7 @@ properties:
- description: frame dma based extraction
- description: analyzer interrupt
- description: ptp interrupt
+ - description: ptp external interrupt
interrupt-names:
minItems: 1
@@ -47,16 +48,15 @@ properties:
- const: fdma
- const: ana
- const: ptp
+ - const: ptp-ext
resets:
items:
- description: Reset controller used for switch core reset (soft reset)
- - description: Reset controller used for releasing the phy from reset
reset-names:
items:
- const: switch
- - const: phy
ethernet-ports:
type: object
@@ -145,8 +145,8 @@ examples:
reg-names = "cpu", "gcb";
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "xtr";
- resets = <&switch_reset 0>, <&phy_reset 0>;
- reset-names = "switch", "phy";
+ resets = <&switch_reset 0>;
+ reset-names = "switch";
ethernet-ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/mscc,miim.yaml b/Documentation/devicetree/bindings/net/mscc,miim.yaml
new file mode 100644
index 000000000000..2c451cfa4e0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mscc,miim.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mscc,miim.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microsemi MII Management Controller (MIIM)
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - mscc,ocelot-miim
+ - microchip,lan966x-miim
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ reg:
+ items:
+ - description: base address
+ - description: associated reset register for internal PHYs
+ minItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio@107009c {
+ compatible = "mscc,ocelot-miim";
+ reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+ interrupts = <14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mscc-miim.txt b/Documentation/devicetree/bindings/net/mscc-miim.txt
deleted file mode 100644
index 70e0cb1ee485..000000000000
--- a/Documentation/devicetree/bindings/net/mscc-miim.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Microsemi MII Management Controller (MIIM) / MDIO
-=================================================
-
-Properties:
-- compatible: must be "mscc,ocelot-miim" or "microchip,lan966x-miim"
-- reg: The base address of the MDIO bus controller register bank. Optionally, a
- second register bank can be defined if there is an associated reset register
- for internal PHYs
-- #address-cells: Must be <1>.
-- #size-cells: Must be <0>. MDIO addresses have no size component.
-- interrupts: interrupt specifier (refer to the interrupt binding)
-
-Typically an MDIO bus might have several children.
-
-Example:
- mdio@107009c {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "mscc,ocelot-miim";
- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
- interrupts = <14>;
-
- phy0: ethernet-phy@0 {
- reg = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index cdf7b873b419..6b32caa8311c 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -20,120 +20,17 @@ properties:
enum:
- qcom,ipq8074-wifi
- qcom,ipq6018-wifi
+ - qcom,wcn6750-wifi
reg:
maxItems: 1
interrupts:
- items:
- - description: misc-pulse1 interrupt events
- - description: misc-latch interrupt events
- - description: sw exception interrupt events
- - description: watchdog interrupt events
- - description: interrupt event for ring CE0
- - description: interrupt event for ring CE1
- - description: interrupt event for ring CE2
- - description: interrupt event for ring CE3
- - description: interrupt event for ring CE4
- - description: interrupt event for ring CE5
- - description: interrupt event for ring CE6
- - description: interrupt event for ring CE7
- - description: interrupt event for ring CE8
- - description: interrupt event for ring CE9
- - description: interrupt event for ring CE10
- - description: interrupt event for ring CE11
- - description: interrupt event for ring host2wbm-desc-feed
- - description: interrupt event for ring host2reo-re-injection
- - description: interrupt event for ring host2reo-command
- - description: interrupt event for ring host2rxdma-monitor-ring3
- - description: interrupt event for ring host2rxdma-monitor-ring2
- - description: interrupt event for ring host2rxdma-monitor-ring1
- - description: interrupt event for ring reo2ost-exception
- - description: interrupt event for ring wbm2host-rx-release
- - description: interrupt event for ring reo2host-status
- - description: interrupt event for ring reo2host-destination-ring4
- - description: interrupt event for ring reo2host-destination-ring3
- - description: interrupt event for ring reo2host-destination-ring2
- - description: interrupt event for ring reo2host-destination-ring1
- - description: interrupt event for ring rxdma2host-monitor-destination-mac3
- - description: interrupt event for ring rxdma2host-monitor-destination-mac2
- - description: interrupt event for ring rxdma2host-monitor-destination-mac1
- - description: interrupt event for ring ppdu-end-interrupts-mac3
- - description: interrupt event for ring ppdu-end-interrupts-mac2
- - description: interrupt event for ring ppdu-end-interrupts-mac1
- - description: interrupt event for ring rxdma2host-monitor-status-ring-mac3
- - description: interrupt event for ring rxdma2host-monitor-status-ring-mac2
- - description: interrupt event for ring rxdma2host-monitor-status-ring-mac1
- - description: interrupt event for ring host2rxdma-host-buf-ring-mac3
- - description: interrupt event for ring host2rxdma-host-buf-ring-mac2
- - description: interrupt event for ring host2rxdma-host-buf-ring-mac1
- - description: interrupt event for ring rxdma2host-destination-ring-mac3
- - description: interrupt event for ring rxdma2host-destination-ring-mac2
- - description: interrupt event for ring rxdma2host-destination-ring-mac1
- - description: interrupt event for ring host2tcl-input-ring4
- - description: interrupt event for ring host2tcl-input-ring3
- - description: interrupt event for ring host2tcl-input-ring2
- - description: interrupt event for ring host2tcl-input-ring1
- - description: interrupt event for ring wbm2host-tx-completions-ring3
- - description: interrupt event for ring wbm2host-tx-completions-ring2
- - description: interrupt event for ring wbm2host-tx-completions-ring1
- - description: interrupt event for ring tcl2host-status-ring
-
+ minItems: 32
+ maxItems: 52
interrupt-names:
- items:
- - const: misc-pulse1
- - const: misc-latch
- - const: sw-exception
- - const: watchdog
- - const: ce0
- - const: ce1
- - const: ce2
- - const: ce3
- - const: ce4
- - const: ce5
- - const: ce6
- - const: ce7
- - const: ce8
- - const: ce9
- - const: ce10
- - const: ce11
- - const: host2wbm-desc-feed
- - const: host2reo-re-injection
- - const: host2reo-command
- - const: host2rxdma-monitor-ring3
- - const: host2rxdma-monitor-ring2
- - const: host2rxdma-monitor-ring1
- - const: reo2ost-exception
- - const: wbm2host-rx-release
- - const: reo2host-status
- - const: reo2host-destination-ring4
- - const: reo2host-destination-ring3
- - const: reo2host-destination-ring2
- - const: reo2host-destination-ring1
- - const: rxdma2host-monitor-destination-mac3
- - const: rxdma2host-monitor-destination-mac2
- - const: rxdma2host-monitor-destination-mac1
- - const: ppdu-end-interrupts-mac3
- - const: ppdu-end-interrupts-mac2
- - const: ppdu-end-interrupts-mac1
- - const: rxdma2host-monitor-status-ring-mac3
- - const: rxdma2host-monitor-status-ring-mac2
- - const: rxdma2host-monitor-status-ring-mac1
- - const: host2rxdma-host-buf-ring-mac3
- - const: host2rxdma-host-buf-ring-mac2
- - const: host2rxdma-host-buf-ring-mac1
- - const: rxdma2host-destination-ring-mac3
- - const: rxdma2host-destination-ring-mac2
- - const: rxdma2host-destination-ring-mac1
- - const: host2tcl-input-ring4
- - const: host2tcl-input-ring3
- - const: host2tcl-input-ring2
- - const: host2tcl-input-ring1
- - const: wbm2host-tx-completions-ring3
- - const: wbm2host-tx-completions-ring2
- - const: wbm2host-tx-completions-ring1
- - const: tcl2host-status-ring
+ maxItems: 52
qcom,rproc:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -151,20 +48,205 @@ properties:
board-2.bin for designs with colliding bus and device specific ids
memory-region:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
description:
phandle to a node describing reserved memory (System RAM memory)
used by ath11k firmware (see bindings/reserved-memory/reserved-memory.txt)
+ iommus:
+ minItems: 1
+ maxItems: 2
+
+ wifi-firmware:
+ type: object
+ description: |
+ WCN6750 wifi node can contain one optional firmware subnode.
+ Firmware subnode is needed when the platform does not have Trustzone.
+ required:
+ - iommus
+
required:
- compatible
- reg
- interrupts
- - interrupt-names
- qcom,rproc
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq8074-wifi
+ - qcom,ipq6018-wifi
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: misc-pulse1 interrupt events
+ - description: misc-latch interrupt events
+ - description: sw exception interrupt events
+ - description: watchdog interrupt events
+ - description: interrupt event for ring CE0
+ - description: interrupt event for ring CE1
+ - description: interrupt event for ring CE2
+ - description: interrupt event for ring CE3
+ - description: interrupt event for ring CE4
+ - description: interrupt event for ring CE5
+ - description: interrupt event for ring CE6
+ - description: interrupt event for ring CE7
+ - description: interrupt event for ring CE8
+ - description: interrupt event for ring CE9
+ - description: interrupt event for ring CE10
+ - description: interrupt event for ring CE11
+ - description: interrupt event for ring host2wbm-desc-feed
+ - description: interrupt event for ring host2reo-re-injection
+ - description: interrupt event for ring host2reo-command
+ - description: interrupt event for ring host2rxdma-monitor-ring3
+ - description: interrupt event for ring host2rxdma-monitor-ring2
+ - description: interrupt event for ring host2rxdma-monitor-ring1
+ - description: interrupt event for ring reo2ost-exception
+ - description: interrupt event for ring wbm2host-rx-release
+ - description: interrupt event for ring reo2host-status
+ - description: interrupt event for ring reo2host-destination-ring4
+ - description: interrupt event for ring reo2host-destination-ring3
+ - description: interrupt event for ring reo2host-destination-ring2
+ - description: interrupt event for ring reo2host-destination-ring1
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac3
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac2
+ - description: interrupt event for ring rxdma2host-monitor-destination-mac1
+ - description: interrupt event for ring ppdu-end-interrupts-mac3
+ - description: interrupt event for ring ppdu-end-interrupts-mac2
+ - description: interrupt event for ring ppdu-end-interrupts-mac1
+ - description: interrupt event for ring rxdma2host-monitor-status-ring-mac3
+ - description: interrupt event for ring rxdma2host-monitor-status-ring-mac2
+ - description: interrupt event for ring rxdma2host-monitor-status-ring-mac1
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac3
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac2
+ - description: interrupt event for ring host2rxdma-host-buf-ring-mac1
+ - description: interrupt event for ring rxdma2host-destination-ring-mac3
+ - description: interrupt event for ring rxdma2host-destination-ring-mac2
+ - description: interrupt event for ring rxdma2host-destination-ring-mac1
+ - description: interrupt event for ring host2tcl-input-ring4
+ - description: interrupt event for ring host2tcl-input-ring3
+ - description: interrupt event for ring host2tcl-input-ring2
+ - description: interrupt event for ring host2tcl-input-ring1
+ - description: interrupt event for ring wbm2host-tx-completions-ring3
+ - description: interrupt event for ring wbm2host-tx-completions-ring2
+ - description: interrupt event for ring wbm2host-tx-completions-ring1
+ - description: interrupt event for ring tcl2host-status-ring
+ interrupt-names:
+ items:
+ - const: misc-pulse1
+ - const: misc-latch
+ - const: sw-exception
+ - const: watchdog
+ - const: ce0
+ - const: ce1
+ - const: ce2
+ - const: ce3
+ - const: ce4
+ - const: ce5
+ - const: ce6
+ - const: ce7
+ - const: ce8
+ - const: ce9
+ - const: ce10
+ - const: ce11
+ - const: host2wbm-desc-feed
+ - const: host2reo-re-injection
+ - const: host2reo-command
+ - const: host2rxdma-monitor-ring3
+ - const: host2rxdma-monitor-ring2
+ - const: host2rxdma-monitor-ring1
+ - const: reo2ost-exception
+ - const: wbm2host-rx-release
+ - const: reo2host-status
+ - const: reo2host-destination-ring4
+ - const: reo2host-destination-ring3
+ - const: reo2host-destination-ring2
+ - const: reo2host-destination-ring1
+ - const: rxdma2host-monitor-destination-mac3
+ - const: rxdma2host-monitor-destination-mac2
+ - const: rxdma2host-monitor-destination-mac1
+ - const: ppdu-end-interrupts-mac3
+ - const: ppdu-end-interrupts-mac2
+ - const: ppdu-end-interrupts-mac1
+ - const: rxdma2host-monitor-status-ring-mac3
+ - const: rxdma2host-monitor-status-ring-mac2
+ - const: rxdma2host-monitor-status-ring-mac1
+ - const: host2rxdma-host-buf-ring-mac3
+ - const: host2rxdma-host-buf-ring-mac2
+ - const: host2rxdma-host-buf-ring-mac1
+ - const: rxdma2host-destination-ring-mac3
+ - const: rxdma2host-destination-ring-mac2
+ - const: rxdma2host-destination-ring-mac1
+ - const: host2tcl-input-ring4
+ - const: host2tcl-input-ring3
+ - const: host2tcl-input-ring2
+ - const: host2tcl-input-ring1
+ - const: wbm2host-tx-completions-ring3
+ - const: wbm2host-tx-completions-ring2
+ - const: wbm2host-tx-completions-ring1
+ - const: tcl2host-status-ring
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq8074-wifi
+ - qcom,ipq6018-wifi
+ then:
+ required:
+ - interrupt-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,wcn6750-wifi
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: interrupt event for ring CE1
+ - description: interrupt event for ring CE2
+ - description: interrupt event for ring CE3
+ - description: interrupt event for ring CE4
+ - description: interrupt event for ring CE5
+ - description: interrupt event for ring CE6
+ - description: interrupt event for ring CE7
+ - description: interrupt event for ring CE8
+ - description: interrupt event for ring CE9
+ - description: interrupt event for ring CE10
+ - description: interrupt event for ring DP1
+ - description: interrupt event for ring DP2
+ - description: interrupt event for ring DP3
+ - description: interrupt event for ring DP4
+ - description: interrupt event for ring DP5
+ - description: interrupt event for ring DP6
+ - description: interrupt event for ring DP7
+ - description: interrupt event for ring DP8
+ - description: interrupt event for ring DP9
+ - description: interrupt event for ring DP10
+ - description: interrupt event for ring DP11
+ - description: interrupt event for ring DP12
+ - description: interrupt event for ring DP13
+ - description: interrupt event for ring DP14
+ - description: interrupt event for ring DP15
+ - description: interrupt event for ring DP16
+ - description: interrupt event for ring DP17
+ - description: interrupt event for ring DP18
+ - description: interrupt event for ring DP19
+ - description: interrupt event for ring DP20
+ - description: interrupt event for ring DP21
+ - description: interrupt event for ring DP22
+
examples:
- |
@@ -309,3 +391,64 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ wlan_ce_mem: memory@4cd000 {
+ no-map;
+ reg = <0x0 0x004cd000 0x0 0x1000>;
+ };
+
+ wlan_fw_mem: memory@80c00000 {
+ no-map;
+ reg = <0x0 0x80c00000 0x0 0xc00000>;
+ };
+ };
+
+ wifi: wifi@17a10040 {
+ compatible = "qcom,wcn6750-wifi";
+ reg = <0x17a10040 0x0>;
+ iommus = <&apps_smmu 0x1c00 0x1>;
+ interrupts = <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 769 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 770 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 771 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 773 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 774 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 775 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 776 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 777 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 778 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 779 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 780 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 781 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 782 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 783 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 784 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 785 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 786 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 787 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 788 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 789 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 790 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 791 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 792 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 793 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 794 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 795 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 796 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 797 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 798 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 799 IRQ_TYPE_EDGE_RISING>;
+ qcom,rproc = <&remoteproc_wpss>;
+ memory-region = <&wlan_fw_mem>, <&wlan_ce_mem>;
+ wifi-firmware {
+ iommus = <&apps_smmu 0x1c02 0x1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
index 105725a127ab..f5a531738d93 100644
--- a/Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -3,7 +3,7 @@
%YAML 1.2
---
-$id: http://devicetree.org/schemas/staging/net/wireless/silabs,wfx.yaml#
+$id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Silicon Labs WFxxx devicetree bindings
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 01430973ecec..e12a75e10456 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -283,6 +283,8 @@ patternProperties:
description: Shenzen Chuangsiqi Technology Co.,Ltd.
"^ctera,.*":
description: CTERA Networks Intl.
+ "^ctu,.*":
+ description: Czech Technical University in Prague
"^cubietech,.*":
description: Cubietech, Ltd.
"^cui,.*":
diff --git a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
new file mode 100644
index 000000000000..2fde5551e756
--- /dev/null
+++ b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
@@ -0,0 +1,639 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+CTU CAN FD Driver
+=================
+
+Author: Martin Jerabek <martin.jerabek01@gmail.com>
+
+
+About CTU CAN FD IP Core
+------------------------
+
+`CTU CAN FD <https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core>`_
+is an open source soft core written in VHDL.
+It originated in 2015 as Ondrej Ille's project
+at the `Department of Measurement <https://meas.fel.cvut.cz/>`_
+of `FEE <http://www.fel.cvut.cz/en/>`_ at `CTU <https://www.cvut.cz/en>`_.
+
+The SocketCAN driver for Xilinx Zynq SoC based MicroZed board
+`Vivado integration <https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top>`_
+and Intel Cyclone V 5CSEMA4U23C6 based DE0-Nano-SoC Terasic board
+`QSys integration <https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd>`_
+has been developed as well as support for
+`PCIe integration <https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd>`_ of the core.
+
+In the case of Zynq, the core is connected via the APB system bus, which does
+not have enumeration support, and the device must be specified in Device Tree.
+This kind of devices is called platform device in the kernel and is
+handled by a platform device driver.
+
+The basic functional model of the CTU CAN FD peripheral has been
+accepted into QEMU mainline. See QEMU `CAN emulation support <https://www.qemu.org/docs/master/system/devices/can.html>`_
+for CAN FD buses, host connection and CTU CAN FD core emulation. The development
+version of emulation support can be cloned from ctu-canfd branch of QEMU local
+development `repository <https://gitlab.fel.cvut.cz/canbus/qemu-canbus>`_.
+
+
+About SocketCAN
+---------------
+
+SocketCAN is a standard common interface for CAN devices in the Linux
+kernel. As the name suggests, the bus is accessed via sockets, similarly
+to common network devices. The reasoning behind this is in depth
+described in `Linux SocketCAN <https://www.kernel.org/doc/html/latest/networking/can.html>`_.
+In short, it offers a
+natural way to implement and work with higher layer protocols over CAN,
+in the same way as, e.g., UDP/IP over Ethernet.
+
+Device probe
+~~~~~~~~~~~~
+
+Before going into detail about the structure of a CAN bus device driver,
+let's reiterate how the kernel gets to know about the device at all.
+Some buses, like PCI or PCIe, support device enumeration. That is, when
+the system boots, it discovers all the devices on the bus and reads
+their configuration. The kernel identifies the device via its vendor ID
+and device ID, and if there is a driver registered for this identifier
+combination, its probe method is invoked to populate the driver's
+instance for the given hardware. A similar situation goes with USB, only
+it allows for device hot-plug.
+
+The situation is different for peripherals which are directly embedded
+in the SoC and connected to an internal system bus (AXI, APB, Avalon,
+and others). These buses do not support enumeration, and thus the kernel
+has to learn about the devices from elsewhere. This is exactly what the
+Device Tree was made for.
+
+Device tree
+~~~~~~~~~~~
+
+An entry in device tree states that a device exists in the system, how
+it is reachable (on which bus it resides) and its configuration –
+registers address, interrupts and so on. An example of such a device
+tree is given in .
+
+.. code:: raw
+
+ / {
+ /* ... */
+ amba: amba {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ CTU_CAN_FD_0: CTU_CAN_FD@43c30000 {
+ compatible = "ctu,ctucanfd";
+ interrupt-parent = <&intc>;
+ interrupts = <0 30 4>;
+ clocks = <&clkc 15>;
+ reg = <0x43c30000 0x10000>;
+ };
+ };
+ };
+
+
+.. _sec:socketcan:drv:
+
+Driver structure
+~~~~~~~~~~~~~~~~
+
+The driver can be divided into two parts – platform-dependent device
+discovery and set up, and platform-independent CAN network device
+implementation.
+
+.. _sec:socketcan:platdev:
+
+Platform device driver
+^^^^^^^^^^^^^^^^^^^^^^
+
+In the case of Zynq, the core is connected via the AXI system bus, which
+does not have enumeration support, and the device must be specified in
+Device Tree. This kind of devices is called *platform device* in the
+kernel and is handled by a *platform device driver*\ [1]_.
+
+A platform device driver provides the following things:
+
+- A *probe* function
+
+- A *remove* function
+
+- A table of *compatible* devices that the driver can handle
+
+The *probe* function is called exactly once when the device appears (or
+the driver is loaded, whichever happens later). If there are more
+devices handled by the same driver, the *probe* function is called for
+each one of them. Its role is to allocate and initialize resources
+required for handling the device, as well as set up low-level functions
+for the platform-independent layer, e.g., *read_reg* and *write_reg*.
+After that, the driver registers the device to a higher layer, in our
+case as a *network device*.
+
+The *remove* function is called when the device disappears, or the
+driver is about to be unloaded. It serves to free the resources
+allocated in *probe* and to unregister the device from higher layers.
+
+Finally, the table of *compatible* devices states which devices the
+driver can handle. The Device Tree entry ``compatible`` is matched
+against the tables of all *platform drivers*.
+
+.. code:: c
+
+ /* Match table for OF platform binding */
+ static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,canfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+ };
+ MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+ static int ctucan_probe(struct platform_device *pdev);
+ static int ctucan_remove(struct platform_device *pdev);
+
+ static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_probe,
+ .remove = ctucan_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ctucan_of_match,
+ },
+ };
+ module_platform_driver(ctucanfd_driver);
+
+
+.. _sec:socketcan:netdev:
+
+Network device driver
+^^^^^^^^^^^^^^^^^^^^^
+
+Each network device must support at least these operations:
+
+- Bring the device up: ``ndo_open``
+
+- Bring the device down: ``ndo_close``
+
+- Submit TX frames to the device: ``ndo_start_xmit``
+
+- Signal TX completion and errors to the network subsystem: ISR
+
+- Submit RX frames to the network subsystem: ISR and NAPI
+
+There are two possible event sources: the device and the network
+subsystem. Device events are usually signaled via an interrupt, handled
+in an Interrupt Service Routine (ISR). Handlers for the events
+originating in the network subsystem are then specified in
+``struct net_device_ops``.
+
+When the device is brought up, e.g., by calling ``ip link set can0 up``,
+the driver’s function ``ndo_open`` is called. It should validate the
+interface configuration and configure and enable the device. The
+analogous opposite is ``ndo_close``, called when the device is being
+brought down, be it explicitly or implicitly.
+
+When the system should transmit a frame, it does so by calling
+``ndo_start_xmit``, which enqueues the frame into the device. If the
+device HW queue (FIFO, mailboxes or whatever the implementation is)
+becomes full, the ``ndo_start_xmit`` implementation informs the network
+subsystem that it should stop the TX queue (via ``netif_stop_queue``).
+It is then re-enabled later in ISR when the device has some space
+available again and is able to enqueue another frame.
+
+All the device events are handled in ISR, namely:
+
+#. **TX completion**. When the device successfully finishes transmitting
+ a frame, the frame is echoed locally. On error, an informative error
+ frame [2]_ is sent to the network subsystem instead. In both cases,
+ the software TX queue is resumed so that more frames may be sent.
+
+#. **Error condition**. If something goes wrong (e.g., the device goes
+ bus-off or RX overrun happens), error counters are updated, and
+ informative error frames are enqueued to SW RX queue.
+
+#. **RX buffer not empty**. In this case, read the RX frames and enqueue
+ them to SW RX queue. Usually NAPI is used as a middle layer (see ).
+
+.. _sec:socketcan:napi:
+
+NAPI
+~~~~
+
+The frequency of incoming frames can be high and the overhead to invoke
+the interrupt service routine for each frame can cause significant
+system load. There are multiple mechanisms in the Linux kernel to deal
+with this situation. They evolved over the years of Linux kernel
+development and enhancements. For network devices, the current standard
+is NAPI – *the New API*. It is similar to classical top-half/bottom-half
+interrupt handling in that it only acknowledges the interrupt in the ISR
+and signals that the rest of the processing should be done in softirq
+context. On top of that, it offers the possibility to *poll* for new
+frames for a while. This has a potential to avoid the costly round of
+enabling interrupts, handling an incoming IRQ in ISR, re-enabling the
+softirq and switching context back to softirq.
+
+More detailed documentation of NAPI may be found on the pages of Linux
+Foundation `<https://wiki.linuxfoundation.org/networking/napi>`_.
+
+Integrating the core to Xilinx Zynq
+-----------------------------------
+
+The core interfaces a simple subset of the Avalon
+(search for Intel **Avalon Interface Specifications**)
+bus as it was originally used on
+Alterra FPGA chips, yet Xilinx natively interfaces with AXI
+(search for ARM **AMBA AXI and ACE Protocol Specification AXI3,
+AXI4, and AXI4-Lite, ACE and ACE-Lite**).
+The most obvious solution would be to use
+an Avalon/AXI bridge or implement some simple conversion entity.
+However, the core’s interface is half-duplex with no handshake
+signaling, whereas AXI is full duplex with two-way signaling. Moreover,
+even AXI-Lite slave interface is quite resource-intensive, and the
+flexibility and speed of AXI are not required for a CAN core.
+
+Thus a much simpler bus was chosen – APB (Advanced Peripheral Bus)
+(search for ARM **AMBA APB Protocol Specification**).
+APB-AXI bridge is directly available in
+Xilinx Vivado, and the interface adaptor entity is just a few simple
+combinatorial assignments.
+
+Finally, to be able to include the core in a block diagram as a custom
+IP, the core, together with the APB interface, has been packaged as a
+Vivado component.
+
+CTU CAN FD Driver design
+------------------------
+
+The general structure of a CAN device driver has already been examined
+in . The next paragraphs provide a more detailed description of the CTU
+CAN FD core driver in particular.
+
+Low-level driver
+~~~~~~~~~~~~~~~~
+
+The core is not intended to be used solely with SocketCAN, and thus it
+is desirable to have an OS-independent low-level driver. This low-level
+driver can then be used in implementations of OS driver or directly
+either on bare metal or in a user-space application. Another advantage
+is that if the hardware slightly changes, only the low-level driver
+needs to be modified.
+
+The code [3]_ is in part automatically generated and in part written
+manually by the core author, with contributions of the thesis’ author.
+The low-level driver supports operations such as: set bit timing, set
+controller mode, enable/disable, read RX frame, write TX frame, and so
+on.
+
+Configuring bit timing
+~~~~~~~~~~~~~~~~~~~~~~
+
+On CAN, each bit is divided into four segments: SYNC, PROP, PHASE1, and
+PHASE2. Their duration is expressed in multiples of a Time Quantum
+(details in `CAN Specification, Version 2.0 <http://esd.cs.ucr.edu/webres/can20.pdf>`_, chapter 8).
+When configuring
+bitrate, the durations of all the segments (and time quantum) must be
+computed from the bitrate and Sample Point. This is performed
+independently for both the Nominal bitrate and Data bitrate for CAN FD.
+
+SocketCAN is fairly flexible and offers either highly customized
+configuration by setting all the segment durations manually, or a
+convenient configuration by setting just the bitrate and sample point
+(and even that is chosen automatically per Bosch recommendation if not
+specified). However, each CAN controller may have different base clock
+frequency and different width of segment duration registers. The
+algorithm thus needs the minimum and maximum values for the durations
+(and clock prescaler) and tries to optimize the numbers to fit both the
+constraints and the requested parameters.
+
+.. code:: c
+
+ struct can_bittiming_const {
+ char name[16]; /* Name of the CAN controller hardware */
+ __u32 tseg1_min; /* Time segment 1 = prop_seg + phase_seg1 */
+ __u32 tseg1_max;
+ __u32 tseg2_min; /* Time segment 2 = phase_seg2 */
+ __u32 tseg2_max;
+ __u32 sjw_max; /* Synchronisation jump width */
+ __u32 brp_min; /* Bit-rate prescaler */
+ __u32 brp_max;
+ __u32 brp_inc;
+ };
+
+
+[lst:can_bittiming_const]
+
+A curious reader will notice that the durations of the segments PROP_SEG
+and PHASE_SEG1 are not determined separately but rather combined and
+then, by default, the resulting TSEG1 is evenly divided between PROP_SEG
+and PHASE_SEG1. In practice, this has virtually no consequences as the
+sample point is between PHASE_SEG1 and PHASE_SEG2. In CTU CAN FD,
+however, the duration registers ``PROP`` and ``PH1`` have different
+widths (6 and 7 bits, respectively), so the auto-computed values might
+overflow the shorter register and must thus be redistributed among the
+two [4]_.
+
+Handling RX
+~~~~~~~~~~~
+
+Frame reception is handled in NAPI queue, which is enabled from ISR when
+the RXNE (RX FIFO Not Empty) bit is set. Frames are read one by one
+until either no frame is left in the RX FIFO or the maximum work quota
+has been reached for the NAPI poll run (see ). Each frame is then passed
+to the network interface RX queue.
+
+An incoming frame may be either a CAN 2.0 frame or a CAN FD frame. The
+way to distinguish between these two in the kernel is to allocate either
+``struct can_frame`` or ``struct canfd_frame``, the two having different
+sizes. In the controller, the information about the frame type is stored
+in the first word of RX FIFO.
+
+This brings us a chicken-egg problem: we want to allocate the ``skb``
+for the frame, and only if it succeeds, fetch the frame from FIFO;
+otherwise keep it there for later. But to be able to allocate the
+correct ``skb``, we have to fetch the first work of FIFO. There are
+several possible solutions:
+
+#. Read the word, then allocate. If it fails, discard the rest of the
+ frame. When the system is low on memory, the situation is bad anyway.
+
+#. Always allocate ``skb`` big enough for an FD frame beforehand. Then
+ tweak the ``skb`` internals to look like it has been allocated for
+ the smaller CAN 2.0 frame.
+
+#. Add option to peek into the FIFO instead of consuming the word.
+
+#. If the allocation fails, store the read word into driver’s data. On
+ the next try, use the stored word instead of reading it again.
+
+Option 1 is simple enough, but not very satisfying if we could do
+better. Option 2 is not acceptable, as it would require modifying the
+private state of an integral kernel structure. The slightly higher
+memory consumption is just a virtual cherry on top of the “cake”. Option
+3 requires non-trivial HW changes and is not ideal from the HW point of
+view.
+
+Option 4 seems like a good compromise, with its disadvantage being that
+a partial frame may stay in the FIFO for a prolonged time. Nonetheless,
+there may be just one owner of the RX FIFO, and thus no one else should
+see the partial frame (disregarding some exotic debugging scenarios).
+Basides, the driver resets the core on its initialization, so the
+partial frame cannot be “adopted” either. In the end, option 4 was
+selected [5]_.
+
+.. _subsec:ctucanfd:rxtimestamp:
+
+Timestamping RX frames
+^^^^^^^^^^^^^^^^^^^^^^
+
+The CTU CAN FD core reports the exact timestamp when the frame has been
+received. The timestamp is by default captured at the sample point of
+the last bit of EOF but is configurable to be captured at the SOF bit.
+The timestamp source is external to the core and may be up to 64 bits
+wide. At the time of writing, passing the timestamp from kernel to
+userspace is not yet implemented, but is planned in the future.
+
+Handling TX
+~~~~~~~~~~~
+
+The CTU CAN FD core has 4 independent TX buffers, each with its own
+state and priority. When the core wants to transmit, a TX buffer in
+Ready state with the highest priority is selected.
+
+The priorities are 3bit numbers in register TX_PRIORITY
+(nibble-aligned). This should be flexible enough for most use cases.
+SocketCAN, however, supports only one FIFO queue for outgoing
+frames [6]_. The buffer priorities may be used to simulate the FIFO
+behavior by assigning each buffer a distinct priority and *rotating* the
+priorities after a frame transmission is completed.
+
+In addition to priority rotation, the SW must maintain head and tail
+pointers into the FIFO formed by the TX buffers to be able to determine
+which buffer should be used for next frame (``txb_head``) and which
+should be the first completed one (``txb_tail``). The actual buffer
+indices are (obviously) modulo 4 (number of TX buffers), but the
+pointers must be at least one bit wider to be able to distinguish
+between FIFO full and FIFO empty – in this situation,
+:math:`txb\_head \equiv txb\_tail\ (\textrm{mod}\ 4)`. An example of how
+the FIFO is maintained, together with priority rotation, is depicted in
+
+|
+
++------+---+---+---+---+
+| TXB# | 0 | 1 | 2 | 3 |
++======+===+===+===+===+
+| Seq | A | B | C | |
++------+---+---+---+---+
+| Prio | 7 | 6 | 5 | 4 |
++------+---+---+---+---+
+| | | T | | H |
++------+---+---+---+---+
+
+|
+
++------+---+---+---+---+
+| TXB# | 0 | 1 | 2 | 3 |
++======+===+===+===+===+
+| Seq | | B | C | |
++------+---+---+---+---+
+| Prio | 4 | 7 | 6 | 5 |
++------+---+---+---+---+
+| | | T | | H |
++------+---+---+---+---+
+
+|
+
++------+---+---+---+---+----+
+| TXB# | 0 | 1 | 2 | 3 | 0’ |
++======+===+===+===+===+====+
+| Seq | E | B | C | D | |
++------+---+---+---+---+----+
+| Prio | 4 | 7 | 6 | 5 | |
++------+---+---+---+---+----+
+| | | T | | | H |
++------+---+---+---+---+----+
+
+|
+
+.. figure:: fsm_txt_buffer_user.svg
+
+ TX Buffer states with possible transitions
+
+.. _subsec:ctucanfd:txtimestamp:
+
+Timestamping TX frames
+^^^^^^^^^^^^^^^^^^^^^^
+
+When submitting a frame to a TX buffer, one may specify the timestamp at
+which the frame should be transmitted. The frame transmission may start
+later, but not sooner. Note that the timestamp does not participate in
+buffer prioritization – that is decided solely by the mechanism
+described above.
+
+Support for time-based packet transmission was recently merged to Linux
+v4.19 `Time-based packet transmission <https://lwn.net/Articles/748879/>`_,
+but it remains yet to be researched
+whether this functionality will be practical for CAN.
+
+Also similarly to retrieving the timestamp of RX frames, the core
+supports retrieving the timestamp of TX frames – that is the time when
+the frame was successfully delivered. The particulars are very similar
+to timestamping RX frames and are described in .
+
+Handling RX buffer overrun
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When a received frame does no more fit into the hardware RX FIFO in its
+entirety, RX FIFO overrun flag (STATUS[DOR]) is set and Data Overrun
+Interrupt (DOI) is triggered. When servicing the interrupt, care must be
+taken first to clear the DOR flag (via COMMAND[CDO]) and after that
+clear the DOI interrupt flag. Otherwise, the interrupt would be
+immediately [7]_ rearmed.
+
+**Note**: During development, it was discussed whether the internal HW
+pipelining cannot disrupt this clear sequence and whether an additional
+dummy cycle is necessary between clearing the flag and the interrupt. On
+the Avalon interface, it indeed proved to be the case, but APB being
+safe because it uses 2-cycle transactions. Essentially, the DOR flag
+would be cleared, but DOI register’s Preset input would still be high
+the cycle when the DOI clear request would also be applied (by setting
+the register’s Reset input high). As Set had higher priority than Reset,
+the DOI flag would not be reset. This has been already fixed by swapping
+the Set/Reset priority (see issue #187).
+
+Reporting Error Passive and Bus Off conditions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It may be desirable to report when the node reaches *Error Passive*,
+*Error Warning*, and *Bus Off* conditions. The driver is notified about
+error state change by an interrupt (EPI, EWLI), and then proceeds to
+determine the core’s error state by reading its error counters.
+
+There is, however, a slight race condition here – there is a delay
+between the time when the state transition occurs (and the interrupt is
+triggered) and when the error counters are read. When EPI is received,
+the node may be either *Error Passive* or *Bus Off*. If the node goes
+*Bus Off*, it obviously remains in the state until it is reset.
+Otherwise, the node is *or was* *Error Passive*. However, it may happen
+that the read state is *Error Warning* or even *Error Active*. It may be
+unclear whether and what exactly to report in that case, but I
+personally entertain the idea that the past error condition should still
+be reported. Similarly, when EWLI is received but the state is later
+detected to be *Error Passive*, *Error Passive* should be reported.
+
+
+CTU CAN FD Driver Sources Reference
+-----------------------------------
+
+.. kernel-doc:: drivers/net/can/ctucanfd/ctucanfd.h
+ :internal:
+
+.. kernel-doc:: drivers/net/can/ctucanfd/ctucanfd_base.c
+ :internal:
+
+.. kernel-doc:: drivers/net/can/ctucanfd/ctucanfd_pci.c
+ :internal:
+
+.. kernel-doc:: drivers/net/can/ctucanfd/ctucanfd_platform.c
+ :internal:
+
+CTU CAN FD IP Core and Driver Development Acknowledgment
+---------------------------------------------------------
+
+* Odrej Ille <ondrej.ille@gmail.com>
+
+ * started the project as student at Department of Measurement, FEE, CTU
+ * invested great amount of personal time and enthusiasm to the project over years
+ * worked on more funded tasks
+
+* `Department of Measurement <https://meas.fel.cvut.cz/>`_,
+ `Faculty of Electrical Engineering <http://www.fel.cvut.cz/en/>`_,
+ `Czech Technical University <https://www.cvut.cz/en>`_
+
+ * is the main investor into the project over many years
+ * uses project in their CAN/CAN FD diagnostics framework for `Skoda Auto <https://www.skoda-auto.cz/>`_
+
+* `Digiteq Automotive <https://www.digiteqautomotive.com/en>`_
+
+ * funding of the project CAN FD Open Cores Support Linux Kernel Based Systems
+ * negotiated and paid CTU to allow public access to the project
+ * provided additional funding of the work
+
+* `Department of Control Engineering <https://control.fel.cvut.cz/en>`_,
+ `Faculty of Electrical Engineering <http://www.fel.cvut.cz/en/>`_,
+ `Czech Technical University <https://www.cvut.cz/en>`_
+
+ * solving the project CAN FD Open Cores Support Linux Kernel Based Systems
+ * providing GitLab management
+ * virtual servers and computational power for continuous integration
+ * providing hardware for HIL continuous integration tests
+
+* `PiKRON Ltd. <http://pikron.com/>`_
+
+ * minor funding to initiate preparation of the project open-sourcing
+
+* Petr Porazil <porazil@pikron.com>
+
+ * design of PCIe transceiver addon board and assembly of boards
+ * design and assembly of MZ_APO baseboard for MicroZed/Zynq based system
+
+* Martin Jerabek <martin.jerabek01@gmail.com>
+
+ * Linux driver development
+ * continuous integration platform architect and GHDL updates
+ * theses `Open-source and Open-hardware CAN FD Protocol Support <https://dspace.cvut.cz/bitstream/handle/10467/80366/F3-DP-2019-Jerabek-Martin-Jerabek-thesis-2019-canfd.pdf>`_
+
+* Jiri Novak <jnovak@fel.cvut.cz>
+
+ * project initiation, management and use at Department of Measurement, FEE, CTU
+
+* Pavel Pisa <pisa@cmp.felk.cvut.cz>
+
+ * initiate open-sourcing, project coordination, management at Department of Control Engineering, FEE, CTU
+
+* Jaroslav Beran<jara.beran@gmail.com>
+
+ * system integration for Intel SoC, core and driver testing and updates
+
+* Carsten Emde (`OSADL <https://www.osadl.org/>`_)
+
+ * provided OSADL expertise to discuss IP core licensing
+ * pointed to possible deadlock for LGPL and CAN bus possible patent case which lead to relicense IP core design to BSD like license
+
+* Reiner Zitzmann and Holger Zeltwanger (`CAN in Automation <https://www.can-cia.org/>`_)
+
+ * provided suggestions and help to inform community about the project and invited us to events focused on CAN bus future development directions
+
+* Jan Charvat
+
+ * implemented CTU CAN FD functional model for QEMU which has been integrated into QEMU mainline (`docs/system/devices/can.rst <https://www.qemu.org/docs/master/system/devices/can.html>`_)
+ * Bachelor theses Model of CAN FD Communication Controller for QEMU Emulator
+
+Notes
+-----
+
+
+.. [1]
+ Other buses have their own specific driver interface to set up the
+ device.
+
+.. [2]
+ Not to be mistaken with CAN Error Frame. This is a ``can_frame`` with
+ ``CAN_ERR_FLAG`` set and some error info in its ``data`` field.
+
+.. [3]
+ Available in CTU CAN FD repository
+ `<https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core>`_
+
+.. [4]
+ As is done in the low-level driver functions
+ ``ctucan_hw_set_nom_bittiming`` and
+ ``ctucan_hw_set_data_bittiming``.
+
+.. [5]
+ At the time of writing this thesis, option 1 is still being used and
+ the modification is queued in gitlab issue #222
+
+.. [6]
+ Strictly speaking, multiple CAN TX queues are supported since v4.19
+ `can: enable multi-queue for SocketCAN devices <https://lore.kernel.org/patchwork/patch/913526/>`_ but no mainline driver is using
+ them yet.
+
+.. [7]
+ Or rather in the next clock cycle
diff --git a/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg b/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg
new file mode 100644
index 000000000000..b371650788f4
--- /dev/null
+++ b/Documentation/networking/device_drivers/can/ctu/fsm_txt_buffer_user.svg
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="113.611mm" height="86.6873mm" version="1.1" viewBox="0 0 113.611 86.6873" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
+ <defs>
+ <marker id="marker3667" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker3517" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker3373" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker3199" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker3037" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker2779" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker2477" overflow="visible" orient="auto">
+ <path transform="scale(.6) rotate(180) translate(0)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker2074" overflow="visible" orient="auto">
+ <path transform="scale(.6) rotate(180) translate(0)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker1964" overflow="visible" orient="auto">
+ <path transform="scale(.6) rotate(180) translate(0)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="marker1856" overflow="visible" orient="auto">
+ <path transform="scale(.6) rotate(180) translate(0)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <marker id="Arrow2Mend" overflow="visible" orient="auto">
+ <path transform="scale(.6) rotate(180) translate(0)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill-rule="evenodd" stroke="#000" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <filter id="filter1204" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <marker id="marker2074-3" overflow="visible" orient="auto">
+ <path transform="scale(-.6)" d="m8.71859 4.03374-10.9259-4.01772 10.9259-4.01772c-1.7455 2.37206-1.73544 5.61745-6e-7 8.03544z" fill="#28a4ff" fill-rule="evenodd" stroke="#28a4ff" stroke-linejoin="round" stroke-width=".625"/>
+ </marker>
+ <filter id="filter1204-6" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-9" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2-9" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2-9-4" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2-9-1" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2-9-1-3" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ <filter id="filter1204-6-2-9-1-3-1" x="-4.19953e-6" y="-5.60084e-6" width="1.00001" height="1.00001" color-interpolation-filters="sRGB">
+ <feGaussianBlur stdDeviation="0.00018829868"/>
+ </filter>
+ </defs>
+ <metadata>
+ <rdf:RDF>
+ <cc:Work rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
+ <dc:title/>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g transform="translate(-49.0277 -104.823)">
+ <g>
+ <path d="m130.534 165.429h-71.1816v-17.5315" fill="none" marker-end="url(#marker2477)" stroke="#28a4ff" stroke-width=".6"/>
+ <path d="m145.034 122.959v-11.5914h-43.1215" fill="none" marker-end="url(#marker3037)" stroke="#28a4ff" stroke-width=".6"/>
+ <rect x="130.679" y="122.933" width="28.2965" height="45.2319" rx="0" ry="0" fill="#e5e5e5" stroke="#717171" stroke-linecap="square" stroke-width=".499999"/>
+ <path d="m102.044 116.236h23.3126l-0.13388 18.8185h19.9383v3.66603" fill="none" marker-end="url(#marker3199)" stroke="#28a4ff" stroke-width=".6"/>
+ <path d="m59.5006 138.391v-24.2517h20.6338" fill="none" marker-end="url(#marker2779)" stroke="#28a4ff" stroke-width=".6"/>
+ <rect x="78.1389" y="126.411" width="28.0037" height="35.0443" rx="0" ry="0" fill="#e5e5e5" stroke="#717171" stroke-linecap="square" stroke-width=".5"/>
+ </g>
+ <g fill="#ffcb35" stroke="#000" stroke-linecap="square">
+ <ellipse cx="92.1408" cy="114.239" rx="10.8866" ry="4.39308" stroke-width=".5"/>
+ <ellipse cx="92.1408" cy="134.185" rx="10.8866" ry="4.39308" stroke-width=".499999"/>
+ <ellipse cx="92.1408" cy="152.199" rx="10.8866" ry="4.39308" stroke-width=".499999"/>
+ </g>
+ <g fill="#28a4ff" stroke="#000" stroke-linecap="square" stroke-width=".499999">
+ <ellipse cx="144.827" cy="143.316" rx="10.8866" ry="4.39308"/>
+ <ellipse cx="144.827" cy="159.143" rx="10.8866" ry="4.39308"/>
+ <ellipse cx="59.4364" cy="142.823" rx="7.36455" ry="4.39308"/>
+ <ellipse cx="144.827" cy="129.196" rx="10.8866" ry="4.39308"/>
+ <ellipse cx="143.077" cy="180.53" rx="10.8866" ry="4.39308"/>
+ </g>
+ <ellipse cx="110.386" cy="180.53" rx="10.8866" ry="4.39308" fill="#ffcb35" stroke="#000" stroke-linecap="square" stroke-width=".499999"/>
+ <text x="110.90907" y="179.42688" font-size="3.175px" xml:space="preserve"><tspan x="110.90907" y="179.42688" dy="0.60000002" text-align="center" text-anchor="middle">Accessible</tspan><tspan x="110.90907" y="183.39563"><tspan font-size="3.175px" text-align="center" text-anchor="middle">for S</tspan>W</tspan></text>
+ <text x="143.5869" y="179.52795" xml:space="preserve"><tspan x="143.5869" y="179.52795" dy="1 0 0 0 0 0" font-family="sans-serif" font-size="2.82222px" text-align="center" text-anchor="middle" style="font-variant-caps:normal;font-variant-east-asian:normal;font-variant-ligatures:normal;font-variant-numeric:normal">Inaccessible</tspan><tspan x="143.5869" y="183.36786" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">for S</tspan>W</tspan></text>
+ <g font-size="3.175px">
+ <text x="91.95018" y="115.29005" xml:space="preserve"><tspan x="91.95018" y="115.29005" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">Ready</tspan></tspan></text>
+ <text x="145.25127" y="130.49019" xml:space="preserve"><tspan x="145.25127" y="130.49019" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">TX OK</tspan></tspan></text>
+ <text x="145.31845" y="144.43121" xml:space="preserve"><tspan x="145.31845" y="144.43121" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">Aborted</tspan></tspan></text>
+ <text x="145.40399" y="160.36035" xml:space="preserve"><tspan x="145.40399" y="160.36035" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">TX failed</tspan></tspan></text>
+ <text x="91.823967" y="133.53941" text-align="center" text-anchor="middle" style="line-height:0.9" xml:space="preserve"><tspan x="91.823967" y="133.53941" text-align="center"><tspan font-size="3.175px" text-align="center" text-anchor="middle">TX in</tspan></tspan><tspan x="91.823967" y="136.39691" text-align="center">progress</tspan></text>
+ <text x="91.648918" y="151.84813" text-align="center" text-anchor="middle" style="line-height:0.9" xml:space="preserve"><tspan x="91.648918" y="151.84813" text-align="center"><tspan font-size="3.175px" text-align="center" text-anchor="middle">Abort in</tspan></tspan><tspan x="91.648918" y="154.70563" text-align="center">progress</tspan></text>
+ <text x="59.456043" y="143.91658" xml:space="preserve"><tspan x="59.456043" y="143.91658" font-size="3.175px"><tspan font-size="3.175px" text-align="center" text-anchor="middle">Empty</tspan></tspan></text>
+ </g>
+ <g fill="none">
+ <g stroke="#000">
+ <rect x="52.3943" y="171.63" width="106.581" height="16.601" rx="0" ry="0" stroke-linecap="square" stroke-width=".499999"/>
+ <g stroke-width=".6">
+ <path d="m106.383 159.046h26.4967" marker-end="url(#Arrow2Mend)"/>
+ <path d="m103.138 152.268h41.5564v-3.92426" marker-end="url(#marker1856)"/>
+ <path d="m106.38 129.354h17.7785"/>
+ <path d="m125.818 129.359h7.2418" marker-end="url(#marker1964)"/>
+ </g>
+ <path d="m124.169 129.354a0.959514 0.97091 0 0 1 0.47587-0.84557 0.959514 0.97091 0 0 1 0.96164-3e-3 0.959514 0.97091 0 0 1 0.48149 0.84231" stroke-linecap="square" stroke-width=".600001"/>
+ <path d="m55.7026 180.832h34.8131" marker-end="url(#marker2074)" stroke-width=".6"/>
+ </g>
+ <g>
+ <path d="m55.6464 185.744h34.8131" marker-end="url(#marker2074-3)" stroke="#28a4ff" stroke-width=".600001"/>
+ <g stroke-width=".6">
+ <path d="m94.0487 129.889v-10.6493" marker-end="url(#marker3373)" stroke="#000"/>
+ <path d="m89.7534 118.621v10.662" marker-end="url(#marker3517)" stroke="#000"/>
+ <path d="m92.119 138.812v7.9718" marker-end="url(#marker3667)" stroke="#28a4ff"/>
+ </g>
+ </g>
+ </g>
+ <text transform="matrix(.264583 0 0 .264583 91.8919 139.964)" x="26.959213" y="9.11724" fill="#2aa1ff" filter="url(#filter1204-6-2-9-1-3-1)" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle" style="line-height:1.1" xml:space="preserve"><tspan x="26.959213" y="9.11724" text-align="center">Set</tspan><tspan x="26.959213" y="22.31724" text-align="center">abort</tspan></text>
+ <text transform="translate(49.0277 104.823)" x="57.620724" y="16.855087" filter="url(#filter1204)" font-size="3.175px" text-align="center" text-anchor="middle" style="line-height:1.1" xml:space="preserve"><tspan x="57.620724" y="16.855087" text-align="center">Transmission</tspan><tspan x="57.620724" y="20.347588" text-align="center">unsuccesfull</tspan></text>
+ <g font-size="12px" stroke-width="3.77953" text-anchor="middle">
+ <text transform="matrix(.264583 0 0 .264583 68.5988 118.913)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">starts</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 106.802 130.509)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">succesfull</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 107.77 145.476)" x="38.824219" y="9.1171875" filter="url(#filter1204)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Transmission</tspan><tspan x="38.824219" y="22.317188" text-align="center">sborted</tspan></text>
+ </g>
+ <g stroke-width="3.77953" text-anchor="middle">
+ <text transform="matrix(.264583 0 0 .264583 107.574 155.948)" x="38.824219" y="9.1171875" filter="url(#filter1204)" font-size="10.6667px" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824219" y="9.1171875" text-align="center">Retransmit</tspan><tspan x="38.824219" y="20.850557" text-align="center">limit reached or</tspan><tspan x="38.824219" y="32.583927" text-align="center">node went bus off</tspan><tspan x="38.824219" y="44.317299" text-align="center"/></text>
+ <text transform="matrix(.264583 0 0 .264583 60.7127 177.384)" x="38.824539" y="9.1173134" filter="url(#filter1204-6)" font-size="12px" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="38.824539" y="9.1173134" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Transmission result</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 45.6885 173.226)" x="57.727047" y="9.11724" filter="url(#filter1204-6-9)" font-size="12px" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="57.727047" y="9.11724" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Legend:</tspan></text>
+ </g>
+ <g fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-anchor="middle">
+ <text transform="matrix(.264583 0 0 .264583 57.0045 182.079)" x="57.727047" y="9.11724" filter="url(#filter1204-6-2)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="57.727047" y="9.11724" fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">SW command</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 57.7865 110.104)" x="40.822609" y="9.11724" filter="url(#filter1204-6-2-9)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="40.822609" y="9.11724" fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Set ready</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 116.893 107.491)" x="28.049065" y="9.1172523" filter="url(#filter1204-6-2-9-4)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="28.049065" y="9.1172523" fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Set ready</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 87.5687 166.324)" x="28.049065" y="9.1172523" filter="url(#filter1204-6-2-9-1)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="28.049065" y="9.1172523" fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Set empty</tspan></text>
+ <text transform="matrix(.264583 0 0 .264583 106.53 113.074)" x="30.228771" y="8.9063139" filter="url(#filter1204-6-2-9-1-3)" text-align="center" style="line-height:1.1" xml:space="preserve"><tspan x="30.228771" y="8.9063139" fill="#2aa1ff" font-size="12px" stroke-width="3.77953" text-align="center" text-anchor="middle">Set abort</tspan></text>
+ </g>
+ </g>
+</svg>
diff --git a/Documentation/networking/device_drivers/can/index.rst b/Documentation/networking/device_drivers/can/index.rst
index 58b6e0ad3030..0c3cc6633559 100644
--- a/Documentation/networking/device_drivers/can/index.rst
+++ b/Documentation/networking/device_drivers/can/index.rst
@@ -10,6 +10,7 @@ Contents:
.. toctree::
:maxdepth: 2
+ ctu/ctucanfd-driver
freescale/flexcan
.. only:: subproject and html
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 6b5dc203da2b..21a97703421d 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -39,6 +39,7 @@ Contents:
intel/iavf
intel/ice
marvell/octeontx2
+ marvell/octeon_ep
mellanox/mlx5
microsoft/netvsc
neterion/s2io
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst
new file mode 100644
index 000000000000..bc562c49011b
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+====================================================================
+Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC
+====================================================================
+
+Network driver for Marvell's Octeon PCI EndPoint NIC.
+Copyright (c) 2020 Marvell International Ltd.
+
+Contents
+========
+
+- `Overview`_
+- `Supported Devices`_
+- `Interface Control`_
+
+Overview
+========
+This driver implements networking functionality of Marvell's Octeon PCI
+EndPoint NIC.
+
+Supported Devices
+=================
+Currently, this driver support following devices:
+ * Network controller: Cavium, Inc. Device b200
+
+Interface Control
+=================
+Network Interface control like changing mtu, link speed, link down/up are
+done by writing command to mailbox command queue, a mailbox interface
+implemented through a reserved region in BAR4.
+This driver writes the commands into the mailbox and the firmware on the
+Octeon device processes them. The firmware also sends unsolicited notifications
+to driver for events suchs as link change, through notification queue
+implemented as part of mailbox interface.
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index 5f5cfdb2a300..601eacaf12f3 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -17,7 +17,6 @@ Contents:
fddi/index
hamradio/index
qlogic/index
- wan/index
wifi/index
wwan/index
diff --git a/Documentation/networking/device_drivers/wan/index.rst b/Documentation/networking/device_drivers/wan/index.rst
deleted file mode 100644
index 9d9ae94f00b4..000000000000
--- a/Documentation/networking/device_drivers/wan/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
-Classic WAN Device Drivers
-==========================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- z8530book
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/wan/z8530book.rst b/Documentation/networking/device_drivers/wan/z8530book.rst
deleted file mode 100644
index fea2c40e7973..000000000000
--- a/Documentation/networking/device_drivers/wan/z8530book.rst
+++ /dev/null
@@ -1,256 +0,0 @@
-=======================
-Z8530 Programming Guide
-=======================
-
-:Author: Alan Cox
-
-Introduction
-============
-
-The Z85x30 family synchronous/asynchronous controller chips are used on
-a large number of cheap network interface cards. The kernel provides a
-core interface layer that is designed to make it easy to provide WAN
-services using this chip.
-
-The current driver only support synchronous operation. Merging the
-asynchronous driver support into this code to allow any Z85x30 device to
-be used as both a tty interface and as a synchronous controller is a
-project for Linux post the 2.4 release
-
-Driver Modes
-============
-
-The Z85230 driver layer can drive Z8530, Z85C30 and Z85230 devices in
-three different modes. Each mode can be applied to an individual channel
-on the chip (each chip has two channels).
-
-The PIO synchronous mode supports the most common Z8530 wiring. Here the
-chip is interface to the I/O and interrupt facilities of the host
-machine but not to the DMA subsystem. When running PIO the Z8530 has
-extremely tight timing requirements. Doing high speeds, even with a
-Z85230 will be tricky. Typically you should expect to achieve at best
-9600 baud with a Z8C530 and 64Kbits with a Z85230.
-
-The DMA mode supports the chip when it is configured to use dual DMA
-channels on an ISA bus. The better cards tend to support this mode of
-operation for a single channel. With DMA running the Z85230 tops out
-when it starts to hit ISA DMA constraints at about 512Kbits. It is worth
-noting here that many PC machines hang or crash when the chip is driven
-fast enough to hold the ISA bus solid.
-
-Transmit DMA mode uses a single DMA channel. The DMA channel is used for
-transmission as the transmit FIFO is smaller than the receive FIFO. it
-gives better performance than pure PIO mode but is nowhere near as ideal
-as pure DMA mode.
-
-Using the Z85230 driver
-=======================
-
-The Z85230 driver provides the back end interface to your board. To
-configure a Z8530 interface you need to detect the board and to identify
-its ports and interrupt resources. It is also your problem to verify the
-resources are available.
-
-Having identified the chip you need to fill in a struct z8530_dev,
-which describes each chip. This object must exist until you finally
-shutdown the board. Firstly zero the active field. This ensures nothing
-goes off without you intending it. The irq field should be set to the
-interrupt number of the chip. (Each chip has a single interrupt source
-rather than each channel). You are responsible for allocating the
-interrupt line. The interrupt handler should be set to
-:c:func:`z8530_interrupt()`. The device id should be set to the
-z8530_dev structure pointer. Whether the interrupt can be shared or not
-is board dependent, and up to you to initialise.
-
-The structure holds two channel structures. Initialise chanA.ctrlio and
-chanA.dataio with the address of the control and data ports. You can or
-this with Z8530_PORT_SLEEP to indicate your interface needs the 5uS
-delay for chip settling done in software. The PORT_SLEEP option is
-architecture specific. Other flags may become available on future
-platforms, eg for MMIO. Initialise the chanA.irqs to &z8530_nop to
-start the chip up as disabled and discarding interrupt events. This
-ensures that stray interrupts will be mopped up and not hang the bus.
-Set chanA.dev to point to the device structure itself. The private and
-name field you may use as you wish. The private field is unused by the
-Z85230 layer. The name is used for error reporting and it may thus make
-sense to make it match the network name.
-
-Repeat the same operation with the B channel if your chip has both
-channels wired to something useful. This isn't always the case. If it is
-not wired then the I/O values do not matter, but you must initialise
-chanB.dev.
-
-If your board has DMA facilities then initialise the txdma and rxdma
-fields for the relevant channels. You must also allocate the ISA DMA
-channels and do any necessary board level initialisation to configure
-them. The low level driver will do the Z8530 and DMA controller
-programming but not board specific magic.
-
-Having initialised the device you can then call
-:c:func:`z8530_init()`. This will probe the chip and reset it into
-a known state. An identification sequence is then run to identify the
-chip type. If the checks fail to pass the function returns a non zero
-error code. Typically this indicates that the port given is not valid.
-After this call the type field of the z8530_dev structure is
-initialised to either Z8530, Z85C30 or Z85230 according to the chip
-found.
-
-Once you have called z8530_init you can also make use of the utility
-function :c:func:`z8530_describe()`. This provides a consistent
-reporting format for the Z8530 devices, and allows all the drivers to
-provide consistent reporting.
-
-Attaching Network Interfaces
-============================
-
-If you wish to use the network interface facilities of the driver, then
-you need to attach a network device to each channel that is present and
-in use. In addition to use the generic HDLC you need to follow some
-additional plumbing rules. They may seem complex but a look at the
-example hostess_sv11 driver should reassure you.
-
-The network device used for each channel should be pointed to by the
-netdevice field of each channel. The hdlc-> priv field of the network
-device points to your private data - you will need to be able to find
-your private data from this.
-
-The way most drivers approach this particular problem is to create a
-structure holding the Z8530 device definition and put that into the
-private field of the network device. The network device fields of the
-channels then point back to the network devices.
-
-If you wish to use the generic HDLC then you need to register the HDLC
-device.
-
-Before you register your network device you will also need to provide
-suitable handlers for most of the network device callbacks. See the
-network device documentation for more details on this.
-
-Configuring And Activating The Port
-===================================
-
-The Z85230 driver provides helper functions and tables to load the port
-registers on the Z8530 chips. When programming the register settings for
-a channel be aware that the documentation recommends initialisation
-orders. Strange things happen when these are not followed.
-
-:c:func:`z8530_channel_load()` takes an array of pairs of
-initialisation values in an array of u8 type. The first value is the
-Z8530 register number. Add 16 to indicate the alternate register bank on
-the later chips. The array is terminated by a 255.
-
-The driver provides a pair of public tables. The z8530_hdlc_kilostream
-table is for the UK 'Kilostream' service and also happens to cover most
-other end host configurations. The z8530_hdlc_kilostream_85230 table
-is the same configuration using the enhancements of the 85230 chip. The
-configuration loaded is standard NRZ encoded synchronous data with HDLC
-bitstuffing. All of the timing is taken from the other end of the link.
-
-When writing your own tables be aware that the driver internally tracks
-register values. It may need to reload values. You should therefore be
-sure to set registers 1-7, 9-11, 14 and 15 in all configurations. Where
-the register settings depend on DMA selection the driver will update the
-bits itself when you open or close. Loading a new table with the
-interface open is not recommended.
-
-There are three standard configurations supported by the core code. In
-PIO mode the interface is programmed up to use interrupt driven PIO.
-This places high demands on the host processor to avoid latency. The
-driver is written to take account of latency issues but it cannot avoid
-latencies caused by other drivers, notably IDE in PIO mode. Because the
-drivers allocate buffers you must also prevent MTU changes while the
-port is open.
-
-Once the port is open it will call the rx_function of each channel
-whenever a completed packet arrived. This is invoked from interrupt
-context and passes you the channel and a network buffer (struct
-sk_buff) holding the data. The data includes the CRC bytes so most
-users will want to trim the last two bytes before processing the data.
-This function is very timing critical. When you wish to simply discard
-data the support code provides the function
-:c:func:`z8530_null_rx()` to discard the data.
-
-To active PIO mode sending and receiving the ``z8530_sync_open`` is called.
-This expects to be passed the network device and the channel. Typically
-this is called from your network device open callback. On a failure a
-non zero error status is returned.
-The :c:func:`z8530_sync_close()` function shuts down a PIO
-channel. This must be done before the channel is opened again and before
-the driver shuts down and unloads.
-
-The ideal mode of operation is dual channel DMA mode. Here the kernel
-driver will configure the board for DMA in both directions. The driver
-also handles ISA DMA issues such as controller programming and the
-memory range limit for you. This mode is activated by calling the
-:c:func:`z8530_sync_dma_open()` function. On failure a non zero
-error value is returned. Once this mode is activated it can be shut down
-by calling the :c:func:`z8530_sync_dma_close()`. You must call
-the close function matching the open mode you used.
-
-The final supported mode uses a single DMA channel to drive the transmit
-side. As the Z85C30 has a larger FIFO on the receive channel this tends
-to increase the maximum speed a little. This is activated by calling the
-``z8530_sync_txdma_open``. This returns a non zero error code on failure. The
-:c:func:`z8530_sync_txdma_close()` function closes down the Z8530
-interface from this mode.
-
-Network Layer Functions
-=======================
-
-The Z8530 layer provides functions to queue packets for transmission.
-The driver internally buffers the frame currently being transmitted and
-one further frame (in order to keep back to back transmission running).
-Any further buffering is up to the caller.
-
-The function :c:func:`z8530_queue_xmit()` takes a network buffer
-in sk_buff format and queues it for transmission. The caller must
-provide the entire packet with the exception of the bitstuffing and CRC.
-This is normally done by the caller via the generic HDLC interface
-layer. It returns 0 if the buffer has been queued and non zero values
-for queue full. If the function accepts the buffer it becomes property
-of the Z8530 layer and the caller should not free it.
-
-The function :c:func:`z8530_get_stats()` returns a pointer to an
-internally maintained per interface statistics block. This provides most
-of the interface code needed to implement the network layer get_stats
-callback.
-
-Porting The Z8530 Driver
-========================
-
-The Z8530 driver is written to be portable. In DMA mode it makes
-assumptions about the use of ISA DMA. These are probably warranted in
-most cases as the Z85230 in particular was designed to glue to PC type
-machines. The PIO mode makes no real assumptions.
-
-Should you need to retarget the Z8530 driver to another architecture the
-only code that should need changing are the port I/O functions. At the
-moment these assume PC I/O port accesses. This may not be appropriate
-for all platforms. Replacing :c:func:`z8530_read_port()` and
-``z8530_write_port`` is intended to be all that is required to port
-this driver layer.
-
-Known Bugs And Assumptions
-==========================
-
-Interrupt Locking
- The locking in the driver is done via the global cli/sti lock. This
- makes for relatively poor SMP performance. Switching this to use a
- per device spin lock would probably materially improve performance.
-
-Occasional Failures
- We have reports of occasional failures when run for very long
- periods of time and the driver starts to receive junk frames. At the
- moment the cause of this is not clear.
-
-Public Functions Provided
-=========================
-
-.. kernel-doc:: drivers/net/wan/z85230.c
- :export:
-
-Internal Functions
-==================
-
-.. kernel-doc:: drivers/net/wan/z85230.c
- :internal:
diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst
new file mode 100644
index 000000000000..a98b468ad479
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-linecard.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Devlink Line card
+=================
+
+Background
+==========
+
+The ``devlink-linecard`` mechanism is targeted for manipulation of
+line cards that serve as a detachable PHY modules for modular switch
+system. Following operations are provided:
+
+ * Get a list of supported line card types.
+ * Provision of a slot with specific line card type.
+ * Get and monitor of line card state and its change.
+ * Get information about line card versions and devices.
+
+Line card according to the type may contain one or more gearboxes
+to mux the lanes with certain speed to multiple ports with lanes
+of different speed. Line card ensures N:M mapping between
+the switch ASIC modules and physical front panel ports.
+
+Overview
+========
+
+Each line card devlink object is created by device driver,
+according to the physical line card slots available on the device.
+
+Similar to splitter cable, where the device might have no way
+of detection of the splitter cable geometry, the device
+might not have a way to detect line card type. For that devices,
+concept of provisioning is introduced. It allows the user to:
+
+ * Provision a line card slot with certain line card type
+
+ - Device driver would instruct the ASIC to prepare all
+ resources accordingly. The device driver would
+ create all instances, namely devlink port and netdevices
+ that reside on the line card, according to the line card type
+ * Manipulate of line card entities even without line card
+ being physically connected or powered-up
+ * Setup splitter cable on line card ports
+
+ - As on the ordinary ports, user may provision a splitter
+ cable of a certain type, without the need to
+ be physically connected to the port
+ * Configure devlink ports and netdevices
+
+Netdevice carrier is decided as follows:
+
+ * Line card is not inserted or powered-down
+
+ - The carrier is always down
+ * Line card is inserted and powered up
+
+ - The carrier is decided as for ordinary port netdevice
+
+Line card state
+===============
+
+The ``devlink-linecard`` mechanism supports the following line card states:
+
+ * ``unprovisioned``: Line card is not provisioned on the slot.
+ * ``unprovisioning``: Line card slot is currently being unprovisioned.
+ * ``provisioning``: Line card slot is currently in a process of being provisioned
+ with a line card type.
+ * ``provisioning_failed``: Provisioning was not successful.
+ * ``provisioned``: Line card slot is provisioned with a type.
+ * ``active``: Line card is powered-up and active.
+
+The following diagram provides a general overview of ``devlink-linecard``
+state transitions::
+
+ +-------------------------+
+ | |
+ +----------------------------------> unprovisioned |
+ | | |
+ | +--------|-------^--------+
+ | | |
+ | | |
+ | +--------v-------|--------+
+ | | |
+ | | provisioning |
+ | | |
+ | +------------|------------+
+ | |
+ | +-----------------------------+
+ | | |
+ | +------------v------------+ +------------v------------+ +-------------------------+
+ | | | | ----> |
+ +----- provisioning_failed | | provisioned | | active |
+ | | | | <---- |
+ | +------------^------------+ +------------|------------+ +-------------------------+
+ | | |
+ | | |
+ | | +------------v------------+
+ | | | |
+ | | | unprovisioning |
+ | | | |
+ | | +------------|------------+
+ | | |
+ | +-----------------------------+
+ | |
+ +-----------------------------------------------+
+
+
+Example usage
+=============
+
+.. code:: shell
+
+ $ devlink lc show [ DEV [ lc LC_INDEX ] ]
+ $ devlink lc set DEV lc LC_INDEX [ { type LC_TYPE | notype } ]
+
+ # Show current line card configuration and status for all slots:
+ $ devlink lc
+
+ # Set slot 8 to be provisioned with type "16x100G":
+ $ devlink lc set pci/0000:01:00.0 lc 8 type 16x100G
+
+ # Set slot 8 to be unprovisioned:
+ $ devlink lc set pci/0000:01:00.0 lc 8 notype
+
+ # Set info for slot 8:
+ $ devlink lc info pci/0000:01:00.0 lc 8
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index c17cdb079611..850715512293 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -39,6 +39,7 @@ general.
devlink-resource
devlink-reload
devlink-trap
+ devlink-linecard
Driver-specific documentation
-----------------------------
diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst
index cf857cb4ba8f..0af345680510 100644
--- a/Documentation/networking/devlink/mlxsw.rst
+++ b/Documentation/networking/devlink/mlxsw.rst
@@ -58,6 +58,39 @@ The ``mlxsw`` driver reports the following versions
- running
- Three digit firmware version
+Line card info versions
+=======================
+
+The ``mlxsw`` driver reports the following versions for line cards
+
+.. list-table:: devlink line card info versions implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``hw.revision``
+ - fixed
+ - The hardware revision for this line card
+ * - ``ini.version``
+ - running
+ - Version of line card INI loaded
+
+Line card device info versions
+==============================
+
+The ``mlxsw`` driver reports the following versions for line card devices
+
+.. list-table:: devlink line card device info versions implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``fw.version``
+ - running
+ - Three digit firmware version
+
Driver-specific Traps
=====================
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst
index ddc1dd039337..ed7fa76e7a40 100644
--- a/Documentation/networking/dsa/dsa.rst
+++ b/Documentation/networking/dsa/dsa.rst
@@ -193,6 +193,23 @@ protocol. If not all packets are of equal size, the tagger can implement the
default behavior by specifying the correct offset incurred by each individual
RX packet. Tail taggers do not cause issues to the flow dissector.
+Checksum offload should work with category 1 and 2 taggers when the DSA master
+driver declares NETIF_F_HW_CSUM in vlan_features and looks at csum_start and
+csum_offset. For those cases, DSA will shift the checksum start and offset by
+the tag size. If the DSA master driver still uses the legacy NETIF_F_IP_CSUM
+or NETIF_F_IPV6_CSUM in vlan_features, the offload might only work if the
+offload hardware already expects that specific tag (perhaps due to matching
+vendors). DSA slaves inherit those flags from the master port, and it is up to
+the driver to correctly fall back to software checksum when the IP header is not
+where the hardware expects. If that check is ineffective, the packets might go
+to the network without a proper checksum (the checksum field will have the
+pseudo IP header sum). For category 3, when the offload hardware does not
+already expect the switch tag in use, the checksum must be calculated before any
+tag is inserted (i.e. inside the tagger). Otherwise, the DSA master would
+include the tail tag in the (software or hardware) checksum calculation. Then,
+when the tag gets stripped by the switch during transmission, it will leave an
+incorrect IP checksum in place.
+
Due to various reasons (most common being category 1 taggers being associated
with DSA-unaware masters, mangling what the master perceives as MAC DA), the
tagging protocol may require the DSA master to operate in promiscuous mode, to
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 24d9be69065d..dbca3e9ec782 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -862,6 +862,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
+ ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
@@ -871,6 +872,12 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
+``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
+path to send packets. In ordinary path, driver fills descriptors in DRAM and
+notifies NIC hardware. In fast path, driver pushes descriptors to the device
+through MMIO writes, thus reducing the latency. However, enabling this feature
+may increase the CPU cost. Drivers may enforce additional per-packet
+eligibility checks (e.g. on packet size).
RINGS_SET
=========
@@ -887,6 +894,7 @@ Request contents:
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
+ ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 66828293d9cb..b882d4238581 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2474,6 +2474,33 @@ drop_unsolicited_na - BOOLEAN
By default this is turned off.
+accept_unsolicited_na - BOOLEAN
+ Add a new neighbour cache entry in STALE state for routers on receiving an
+ unsolicited neighbour advertisement with target link-layer address option
+ specified. This is as per router-side behavior documented in RFC9131.
+ This has lower precedence than drop_unsolicited_na.
+
+ ==== ====== ====== ==============================================
+ drop accept fwding behaviour
+ ---- ------ ------ ----------------------------------------------
+ 1 X X Drop NA packet and don't pass up the stack
+ 0 0 X Pass NA packet up the stack, don't update NC
+ 0 1 0 Pass NA packet up the stack, don't update NC
+ 0 1 1 Pass NA packet up the stack, and add a STALE
+ NC entry
+ ==== ====== ====== ==============================================
+
+ This will optimize the return path for the initial off-link communication
+ that is initiated by a directly connected host, by ensuring that
+ the first-hop router which turns on this setting doesn't have to
+ buffer the initial return packets to do neighbour-solicitation.
+ The prerequisite is that the host is configured to send
+ unsolicited neighbour advertisements on interface bringup.
+ This setting should be used in conjunction with the ndisc_notify setting
+ on the host to satisfy this prerequisite.
+
+ By default this is turned off.
+
enhanced_dad - BOOLEAN
Include a nonce option in the IPv6 neighbor solicitation messages used for
duplicate address detection per RFC7527. A received DAD NS will only signal
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index b0d4da71e68e..e263dfcc4b40 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -46,6 +46,24 @@ allow_join_initial_addr_port - BOOLEAN
Default: 1
+pm_type - INTEGER
+
+ Set the default path manager type to use for each new MPTCP
+ socket. In-kernel path management will control subflow
+ connections and address advertisements according to
+ per-namespace values configured over the MPTCP netlink
+ API. Userspace path management puts per-MPTCP-connection subflow
+ connection decisions and address advertisements under control of
+ a privileged userspace program, at the cost of more netlink
+ traffic to propagate all of the related events and commands.
+
+ This is a per-namespace sysctl.
+
+ * 0 - In-kernel path manager
+ * 1 - Userspace path manager
+
+ Default: 0
+
stale_loss_cnt - INTEGER
The number of MPTCP-level retransmission intervals with no traffic and
pending outstanding data on a given subflow required to declare it stale.
diff --git a/MAINTAINERS b/MAINTAINERS
index edc96cdb85e8..cbf21e11deb7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5050,12 +5050,6 @@ S: Maintained
F: Documentation/hwmon/corsair-psu.rst
F: drivers/hwmon/corsair-psu.c
-COSA/SRP SYNC SERIAL DRIVER
-M: Jan "Yenya" Kasprzak <kas@fi.muni.cz>
-S: Maintained
-W: http://www.fi.muni.cz/~kas/cosa/
-F: drivers/net/wan/cosa*
-
COUNTER SUBSYSTEM
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
@@ -5237,6 +5231,14 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
F: drivers/media/platform/sunxi/sun6i-csi/
+CTU CAN FD DRIVER
+M: Pavel Pisa <pisa@cmp.felk.cvut.cz>
+M: Ondrej Ille <ondrej.ille@gmail.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
+F: drivers/net/can/ctucanfd/
+
CW1200 WLAN driver
M: Solomon Peachy <pizza@shaftnet.org>
S: Maintained
@@ -8767,7 +8769,6 @@ F: kernel/time/timer_*.c
HIGH-SPEED SCC DRIVER FOR AX.25
L: linux-hams@vger.kernel.org
S: Orphan
-F: drivers/net/hamradio/dmascc.c
F: drivers/net/hamradio/scc.c
HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
@@ -11831,6 +11832,13 @@ S: Supported
F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
F: drivers/mmc/host/sdhci-xenon*
+MARVELL OCTEON ENDPOINT DRIVER
+M: Veerasenareddy Burru <vburru@marvell.com>
+M: Abhijit Ayarekar <aayarekar@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeon_ep
+
MATROX FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
S: Orphan
@@ -12909,6 +12917,13 @@ F: drivers/net/dsa/microchip/*
F: include/linux/platform_data/microchip-ksz.h
F: net/dsa/tag_ksz.c
+MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER
+M: Arun Ramadoss <arun.ramadoss@microchip.com>
+R: UNGLinuxDriver@microchip.com
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/microchip_t1.c
+
MICROCHIP LAN743X ETHERNET DRIVER
M: Bryan Whitehead <bryan.whitehead@microchip.com>
M: UNGLinuxDriver@microchip.com
@@ -15979,6 +15994,12 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/admin-guide/media/pulse8-cec.rst
F: drivers/media/cec/usb/pulse8/
+PURELIFI PLFXLC DRIVER
+M: Srinivasan Raju <srini.raju@purelifi.com>
+L: linux-wireless@vger.kernel.org
+S: Supported
+F: drivers/net/wireless/purelifi/plfxlc/
+
PVRUSB2 VIDEO4LINUX DRIVER
M: Mike Isely <isely@pobox.com>
L: pvrusb2@isely.net (subscribers-only)
@@ -17989,8 +18010,8 @@ F: drivers/platform/x86/touchscreen_dmi.c
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
S: Supported
-F: Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml
-F: drivers/staging/wfx/
+F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+F: drivers/net/wireless/silabs/wfx/
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -21643,7 +21664,7 @@ M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
L: linux-can@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
+F: Documentation/devicetree/bindings/net/can/xilinx,can.yaml
F: drivers/net/can/xilinx_can.c
XILINX GPIO DRIVER
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 7d81535893af..739891b94136 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -135,6 +135,8 @@
#define SO_TXREHASH 74
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 3d5ce9da42c3..6aa1fd5c9359 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -181,6 +181,7 @@
status = "disabled";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mdio1_default>;
+ resets = <&syscon ASPEED_RESET_MII>;
};
mdio1: mdio@1e650008 {
@@ -191,6 +192,7 @@
status = "disabled";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mdio2_default>;
+ resets = <&syscon ASPEED_RESET_MII>;
};
mdio2: mdio@1e650010 {
@@ -201,6 +203,7 @@
status = "disabled";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mdio3_default>;
+ resets = <&syscon ASPEED_RESET_MII>;
};
mdio3: mdio@1e650018 {
@@ -211,6 +214,7 @@
status = "disabled";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mdio4_default>;
+ resets = <&syscon ASPEED_RESET_MII>;
};
mac0: ftgmac@1e660000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 6f8cb3ad1e84..f232f8baf4e8 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -357,7 +357,7 @@
};
cci_control2: slave-if@5000 {
- compatible = "arm,cci-400-ctrl-if";
+ compatible = "arm,cci-400-ctrl-if", "syscon";
interface-type = "ace";
reg = <0x5000 0x1000>;
};
@@ -901,6 +901,11 @@
};
};
+ hifsys: syscon@1af00000 {
+ compatible = "mediatek,mt7622-hifsys", "syscon";
+ reg = <0 0x1af00000 0 0x70>;
+ };
+
ethsys: syscon@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
@@ -919,6 +924,26 @@
#dma-cells = <1>;
};
+ pcie_mirror: pcie-mirror@10000400 {
+ compatible = "mediatek,mt7622-pcie-mirror",
+ "syscon";
+ reg = <0 0x10000400 0 0x10>;
+ };
+
+ wed0: wed@1020a000 {
+ compatible = "mediatek,mt7622-wed",
+ "syscon";
+ reg = <0 0x1020a000 0 0x1000>;
+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ wed1: wed@1020b000 {
+ compatible = "mediatek,mt7622-wed",
+ "syscon";
+ reg = <0 0x1020b000 0 0x1000>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
+ };
+
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
@@ -945,6 +970,11 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys>;
+ cci-control-port = <&cci_control2>;
+ mediatek,wed = <&wed0>, <&wed1>;
+ mediatek,pcie-mirror = <&pcie_mirror>;
+ mediatek,hifsys = <&hifsys>;
+ dma-coherent;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 1e5760d567ae..6aa2dc836db1 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -201,6 +201,8 @@ enum aarch64_insn_size_type {
enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_REG_OFFSET,
AARCH64_INSN_LDST_STORE_REG_OFFSET,
+ AARCH64_INSN_LDST_LOAD_IMM_OFFSET,
+ AARCH64_INSN_LDST_STORE_IMM_OFFSET,
AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
@@ -335,6 +337,7 @@ __AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(str_imm, 0x3FC00000, 0x39000000)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000)
__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000)
@@ -342,6 +345,7 @@ __AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
@@ -501,6 +505,11 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
enum aarch64_insn_register offset,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ unsigned int imm,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 5e90887deec4..695d7368fadc 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -299,29 +299,24 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
return insn;
}
+static const u32 aarch64_insn_ldst_size[] = {
+ [AARCH64_INSN_SIZE_8] = 0,
+ [AARCH64_INSN_SIZE_16] = 1,
+ [AARCH64_INSN_SIZE_32] = 2,
+ [AARCH64_INSN_SIZE_64] = 3,
+};
+
static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
u32 insn)
{
u32 size;
- switch (type) {
- case AARCH64_INSN_SIZE_8:
- size = 0;
- break;
- case AARCH64_INSN_SIZE_16:
- size = 1;
- break;
- case AARCH64_INSN_SIZE_32:
- size = 2;
- break;
- case AARCH64_INSN_SIZE_64:
- size = 3;
- break;
- default:
+ if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
pr_err("%s: unknown size encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
+ size = aarch64_insn_ldst_size[type];
insn &= ~GENMASK(31, 30);
insn |= size << 30;
@@ -504,6 +499,50 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
offset);
}
+u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ unsigned int imm,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+ u32 shift;
+
+ if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
+ pr_err("%s: unknown size encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ shift = aarch64_insn_ldst_size[size];
+ if (imm & ~(BIT(12 + shift) - BIT(shift))) {
+ pr_err("%s: invalid imm: %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ imm >>= shift;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
+ insn = aarch64_insn_get_ldr_imm_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
+ insn = aarch64_insn_get_str_imm_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+}
+
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index dd59b5ad8fe4..194c95ccc1cf 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -66,6 +66,20 @@
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
+/* Load/store register (immediate offset) */
+#define A64_LS_IMM(Rt, Rn, imm, size, type) \
+ aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
+ AARCH64_INSN_SIZE_##size, \
+ AARCH64_INSN_LDST_##type##_IMM_OFFSET)
+#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
+#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
+#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
+#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
+#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
+#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
+#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
+#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
+
/* Load/store register pair */
#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
@@ -249,6 +263,9 @@
/* HINTs */
#define A64_HINT(x) aarch64_insn_gen_hint(x)
+#define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
+#define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
+
/* BTI */
#define A64_BTI_C A64_HINT(AARCH64_INSN_HINT_BTIC)
#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index fcc675aa1670..8ab4035dea27 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -26,6 +26,7 @@
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
+#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -63,6 +64,7 @@ static const int bpf2a64[] = {
[TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
+ [FP_BOTTOM] = A64_R(27),
};
struct jit_ctx {
@@ -73,6 +75,7 @@ struct jit_ctx {
int exentry_idx;
__le32 *image;
u32 stack_size;
+ int fpb_offset;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -191,11 +194,53 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
+/*
+ * There are 3 types of AArch64 LDR/STR (immediate) instruction:
+ * Post-index, Pre-index, Unsigned offset.
+ *
+ * For BPF ldr/str, the "unsigned offset" type is sufficient.
+ *
+ * "Unsigned offset" type LDR(immediate) format:
+ *
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x x|1 1 1 0 0 1 0 1| imm12 | Rn | Rt |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * scale
+ *
+ * "Unsigned offset" type STR(immediate) format:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x x|1 1 1 0 0 1 0 0| imm12 | Rn | Rt |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * scale
+ *
+ * The offset is calculated from imm12 and scale in the following way:
+ *
+ * offset = (u64)imm12 << scale
+ */
+static bool is_lsi_offset(int offset, int scale)
+{
+ if (offset < 0)
+ return false;
+
+ if (offset > (0xFFF << scale))
+ return false;
+
+ if (offset & ((1 << scale) - 1))
+ return false;
+
+ return true;
+}
+
/* Tail call offset to jump into */
-#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
-#define PROLOGUE_OFFSET 8
+#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) || \
+ IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)
+#define PROLOGUE_OFFSET 9
#else
-#define PROLOGUE_OFFSET 7
+#define PROLOGUE_OFFSET 8
#endif
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
@@ -207,6 +252,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
const int idx0 = ctx->idx;
int cur_offset;
@@ -233,8 +279,11 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
*
*/
+ /* Sign lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_PACIASP, ctx);
/* BTI landing pad */
- if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
+ else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
emit(A64_BTI_C, ctx);
/* Save FP and LR registers to stay align with ARM64 AAPCS */
@@ -245,6 +294,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_PUSH(r6, r7, A64_SP), ctx);
emit(A64_PUSH(r8, r9, A64_SP), ctx);
emit(A64_PUSH(fp, tcc, A64_SP), ctx);
+ emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
@@ -265,6 +315,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
+ emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
+
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
@@ -512,10 +564,13 @@ static void build_epilogue(struct jit_ctx *ctx)
const u8 r8 = bpf2a64[BPF_REG_8];
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ /* Restore x27 and x28 */
+ emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -529,6 +584,10 @@ static void build_epilogue(struct jit_ctx *ctx)
/* Set return value */
emit(A64_MOV(1, A64_R(0), r0), ctx);
+ /* Authenticate lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_AUTIASP, ctx);
+
emit(A64_RET(A64_LR), ctx);
}
@@ -609,6 +668,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -617,6 +678,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 jmp_cond;
s32 jmp_offset;
u32 a64_insn;
+ u8 src_adj;
+ u8 dst_adj;
+ int off_adj;
int ret;
switch (code) {
@@ -971,19 +1035,45 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
- emit_a64_mov_i(1, tmp, off, ctx);
+ if (ctx->fpb_offset > 0 && src == fp) {
+ src_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ src_adj = src;
+ off_adj = off;
+ }
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_LDR32(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDR32(dst, src, tmp), ctx);
+ }
break;
case BPF_H:
- emit(A64_LDRH(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDRH(dst, src, tmp), ctx);
+ }
break;
case BPF_B:
- emit(A64_LDRB(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDRB(dst, src, tmp), ctx);
+ }
break;
case BPF_DW:
- emit(A64_LDR64(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_LDR64I(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDR64(dst, src, tmp), ctx);
+ }
break;
}
@@ -1010,21 +1100,47 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
+ if (ctx->fpb_offset > 0 && dst == fp) {
+ dst_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ dst_adj = dst;
+ off_adj = off;
+ }
/* Load imm to a register then store it */
- emit_a64_mov_i(1, tmp2, off, ctx);
emit_a64_mov_i(1, tmp, imm, ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_STR32(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_STR32I(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STR32(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_H:
- emit(A64_STRH(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_STRHI(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STRH(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_B:
- emit(A64_STRB(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_STRBI(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STRB(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_DW:
- emit(A64_STR64(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_STR64I(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STR64(tmp, dst, tmp2), ctx);
+ }
break;
}
break;
@@ -1034,19 +1150,45 @@ emit_cond_jmp:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- emit_a64_mov_i(1, tmp, off, ctx);
+ if (ctx->fpb_offset > 0 && dst == fp) {
+ dst_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ dst_adj = dst;
+ off_adj = off;
+ }
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_STR32(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_STR32I(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STR32(src, dst, tmp), ctx);
+ }
break;
case BPF_H:
- emit(A64_STRH(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_STRHI(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STRH(src, dst, tmp), ctx);
+ }
break;
case BPF_B:
- emit(A64_STRB(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_STRBI(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STRB(src, dst, tmp), ctx);
+ }
break;
case BPF_DW:
- emit(A64_STR64(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_STR64I(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STR64(src, dst, tmp), ctx);
+ }
break;
}
break;
@@ -1069,6 +1211,79 @@ emit_cond_jmp:
return 0;
}
+/*
+ * Return 0 if FP may change at runtime, otherwise find the minimum negative
+ * offset to FP, converts it to positive number, and align down to 8 bytes.
+ */
+static int find_fpb_offset(struct bpf_prog *prog)
+{
+ int i;
+ int offset = 0;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ const u8 class = BPF_CLASS(insn->code);
+ const u8 mode = BPF_MODE(insn->code);
+ const u8 src = insn->src_reg;
+ const u8 dst = insn->dst_reg;
+ const s32 imm = insn->imm;
+ const s16 off = insn->off;
+
+ switch (class) {
+ case BPF_STX:
+ case BPF_ST:
+ /* fp holds atomic operation result */
+ if (class == BPF_STX && mode == BPF_ATOMIC &&
+ ((imm == BPF_XCHG ||
+ imm == (BPF_FETCH | BPF_ADD) ||
+ imm == (BPF_FETCH | BPF_AND) ||
+ imm == (BPF_FETCH | BPF_XOR) ||
+ imm == (BPF_FETCH | BPF_OR)) &&
+ src == BPF_REG_FP))
+ return 0;
+
+ if (mode == BPF_MEM && dst == BPF_REG_FP &&
+ off < offset)
+ offset = insn->off;
+ break;
+
+ case BPF_JMP32:
+ case BPF_JMP:
+ break;
+
+ case BPF_LDX:
+ case BPF_LD:
+ /* fp holds load result */
+ if (dst == BPF_REG_FP)
+ return 0;
+
+ if (class == BPF_LDX && mode == BPF_MEM &&
+ src == BPF_REG_FP && off < offset)
+ offset = off;
+ break;
+
+ case BPF_ALU:
+ case BPF_ALU64:
+ default:
+ /* fp holds ALU result */
+ if (dst == BPF_REG_FP)
+ return 0;
+ }
+ }
+
+ if (offset < 0) {
+ /*
+ * safely be converted to a positive 'int', since insn->off
+ * is 's16'
+ */
+ offset = -offset;
+ /* align down to 8 bytes */
+ offset = ALIGN_DOWN(offset, 8);
+ }
+
+ return offset;
+}
+
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
@@ -1190,6 +1405,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
+ ctx.fpb_offset = find_fpb_offset(prog);
+
/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
*
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 5cb91509bb7c..d82f4ebf687f 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -178,12 +178,8 @@ CONFIG_NETCONSOLE=m
CONFIG_ATM_TCP=m
CONFIG_ATM_LANAI=m
CONFIG_ATM_ENI=m
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
CONFIG_ATM_NICSTAR=m
CONFIG_ATM_IDT77252=m
-CONFIG_ATM_AMBASSADOR=m
-CONFIG_ATM_HORIZON=m
CONFIG_ATM_IA=m
CONFIG_ATM_FORE200E=m
CONFIG_ATM_HE=m
@@ -214,7 +210,6 @@ CONFIG_ATH_DEBUG=y
CONFIG_ATH5K=y
CONFIG_ATH5K_DEBUG=y
CONFIG_WAN=y
-CONFIG_LANMEDIA=m
CONFIG_HDLC=m
CONFIG_HDLC_RAW=m
CONFIG_HDLC_RAW_ETH=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 205d3b34528c..0cb4d9aa14d1 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -255,12 +255,8 @@ CONFIG_ARCNET_COM20020_CS=m
CONFIG_ATM_TCP=m
CONFIG_ATM_LANAI=m
CONFIG_ATM_ENI=m
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
CONFIG_ATM_NICSTAR=m
CONFIG_ATM_IDT77252=m
-CONFIG_ATM_AMBASSADOR=m
-CONFIG_ATM_HORIZON=m
CONFIG_ATM_IA=m
CONFIG_ATM_FORE200E=m
CONFIG_ATM_HE=m
@@ -363,7 +359,6 @@ CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_SIERRA_NET=m
CONFIG_WAN=y
-CONFIG_LANMEDIA=m
CONFIG_HDLC=m
CONFIG_HDLC_RAW=m
CONFIG_HDLC_RAW_ETH=m
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 1d55e57b8466..18f3d95ecfec 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -146,6 +146,8 @@
#define SO_TXREHASH 74
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 654061e0964e..f486d3dfb6bb 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -127,6 +127,8 @@
#define SO_TXREHASH 0x4048
+#define SO_RCVMARK 0x4049
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index f42d9cd3b64d..2a3715bf29fe 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -535,6 +535,43 @@ static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
}
+static inline u32 rv_amoand_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0xc, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_amoor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x8, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_amoxor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x4, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_amoswap_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x1, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_lr_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x2, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_sc_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x3, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static inline u32 rv_fence(u8 pred, u8 succ)
+{
+ u16 imm11_0 = pred << 4 | succ;
+
+ return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
+}
+
/* RVC instrutions. */
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
@@ -753,6 +790,36 @@ static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
}
+static inline u32 rv_amoand_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0xc, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static inline u32 rv_amoor_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x8, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static inline u32 rv_amoxor_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x4, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static inline u32 rv_amoswap_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x1, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static inline u32 rv_lr_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x2, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static inline u32 rv_sc_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0x3, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
/* RV64-only RVC instructions. */
static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 0bcda99d1d68..00df3a8f92ac 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -455,6 +455,90 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
return 0;
}
+static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ struct rv_jit_context *ctx)
+{
+ u8 r0;
+ int jmp_offset;
+
+ if (off) {
+ if (is_12b_int(off)) {
+ emit_addi(RV_REG_T1, rd, off, ctx);
+ } else {
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ }
+ rd = RV_REG_T1;
+ }
+
+ switch (imm) {
+ /* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */
+ case BPF_ADD:
+ emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ case BPF_AND:
+ emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ case BPF_OR:
+ emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ case BPF_XOR:
+ emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ /* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
+ case BPF_ADD | BPF_FETCH:
+ emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
+ rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_AND | BPF_FETCH:
+ emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
+ rv_amoand_w(rs, rs, rd, 0, 0), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_OR | BPF_FETCH:
+ emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
+ rv_amoor_w(rs, rs, rd, 0, 0), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_XOR | BPF_FETCH:
+ emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
+ rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
+ case BPF_XCHG:
+ emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
+ rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ /* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
+ case BPF_CMPXCHG:
+ r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
+ emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
+ rv_addiw(RV_REG_T2, r0, 0), ctx);
+ emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
+ rv_lr_w(r0, 0, rd, 0, 0), ctx);
+ jmp_offset = ninsns_rvoff(8);
+ emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
+ emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
+ rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
+ jmp_offset = ninsns_rvoff(-6);
+ emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
+ emit(rv_fence(0x3, 0x3), ctx);
+ break;
+ }
+}
+
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
@@ -1146,30 +1230,8 @@ out_be:
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
- if (insn->imm != BPF_ADD) {
- pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
- insn->imm);
- return -EINVAL;
- }
-
- /* atomic_add: lock *(u32 *)(dst + off) += src
- * atomic_add: lock *(u64 *)(dst + off) += src
- */
-
- if (off) {
- if (is_12b_int(off)) {
- emit_addi(RV_REG_T1, rd, off, ctx);
- } else {
- emit_imm(RV_REG_T1, off, ctx);
- emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- }
-
- rd = RV_REG_T1;
- }
-
- emit(BPF_SIZE(code) == BPF_W ?
- rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) :
- rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ emit_atomic(rd, rs, off, imm,
+ BPF_SIZE(code) == BPF_DW, ctx);
break;
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 666f81e617ea..2fda57a3ea86 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -128,6 +128,7 @@
#define SO_TXREHASH 0x0053
+#define SO_RCVMARK 0x0054
#if !defined(__KERNEL__)
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index b9370bbca828..63cdb46a3439 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -146,36 +146,6 @@ config ATM_ENI_BURST_RX_2W
try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or
8W are also set may or may not improve throughput.
-config ATM_FIRESTREAM
- tristate "Fujitsu FireStream (FS50/FS155) "
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the Fujitsu FireStream 155 (MB86697) and
- FireStream 50 (MB86695) ATM PCI chips.
-
- To compile this driver as a module, choose M here: the module will
- be called firestream.
-
-config ATM_ZATM
- tristate "ZeitNet ZN1221/ZN1225"
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM
- adapters.
-
- To compile this driver as a module, choose M here: the module will
- be called zatm.
-
-config ATM_ZATM_DEBUG
- bool "Enable extended debugging"
- depends on ATM_ZATM
- help
- Extended debugging records various events and displays that list
- when an inconsistency is detected. This mechanism is faster than
- generally using printks, but still has some impact on performance.
- Note that extended debugging may create certain race conditions
- itself. Enable this ONLY if you suspect problems with the driver.
-
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
depends on PCI
@@ -244,55 +214,6 @@ config ATM_IDT77252_USE_SUNI
depends on ATM_IDT77252
default y
-config ATM_AMBASSADOR
- tristate "Madge Ambassador (Collage PCI 155 Server)"
- depends on PCI && VIRT_TO_BUS
- select BITREVERSE
- help
- This is a driver for ATMizer based ATM card produced by Madge
- Networks Ltd. Say Y (or M to compile as a module named ambassador)
- here if you have one of these cards.
-
-config ATM_AMBASSADOR_DEBUG
- bool "Enable debugging messages"
- depends on ATM_AMBASSADOR
- help
- Somewhat useful debugging messages are available. The choice of
- messages is controlled by a bitmap. This may be specified as a
- module argument (kernel command line argument as well?), changed
- dynamically using an ioctl (not yet) or changed by sending the
- string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file
- <file:drivers/atm/ambassador.h> for the meanings of the bits in the
- mask.
-
- When active, these messages can have a significant impact on the
- speed of the driver, and the size of your syslog files! When
- inactive, they will have only a modest impact on performance.
-
-config ATM_HORIZON
- tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)"
- depends on PCI && VIRT_TO_BUS
- help
- This is a driver for the Horizon chipset ATM adapter cards once
- produced by Madge Networks Ltd. Say Y (or M to compile as a module
- named horizon) here if you have one of these cards.
-
-config ATM_HORIZON_DEBUG
- bool "Enable debugging messages"
- depends on ATM_HORIZON
- help
- Somewhat useful debugging messages are available. The choice of
- messages is controlled by a bitmap. This may be specified as a
- module argument (kernel command line argument as well?), changed
- dynamically using an ioctl (not yet) or changed by sending the
- string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file
- <file:drivers/atm/horizon.h> for the meanings of the bits in the
- mask.
-
- When active, these messages can have a significant impact on the
- speed of the driver, and the size of your syslog files! When
- inactive, they will have only a modest impact on performance.
-
config ATM_IA
tristate "Interphase ATM PCI x575/x525/x531"
depends on PCI
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index aa191616a72e..c9eade92019b 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -5,10 +5,7 @@
fore_200e-y := fore200e.o
-obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o
obj-$(CONFIG_ATM_NICSTAR) += nicstar.o
-obj-$(CONFIG_ATM_AMBASSADOR) += ambassador.o
-obj-$(CONFIG_ATM_HORIZON) += horizon.o
obj-$(CONFIG_ATM_IA) += iphase.o suni.o
obj-$(CONFIG_ATM_FORE200E) += fore_200e.o
obj-$(CONFIG_ATM_ENI) += eni.o suni.o
@@ -27,7 +24,6 @@ endif
obj-$(CONFIG_ATM_DUMMY) += adummy.o
obj-$(CONFIG_ATM_TCP) += atmtcp.o
-obj-$(CONFIG_ATM_FIRESTREAM) += firestream.o
obj-$(CONFIG_ATM_LANAI) += lanai.o
obj-$(CONFIG_ATM_HE) += he.o
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
deleted file mode 100644
index c039b8a4fefe..000000000000
--- a/drivers/atm/ambassador.c
+++ /dev/null
@@ -1,2400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Madge Ambassador ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/* * dedicated to the memory of Graham Gordon 1971-1998 * */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/atmdev.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/poison.h>
-#include <linux/bitrev.h>
-#include <linux/mutex.h>
-#include <linux/firmware.h>
-#include <linux/ihex.h>
-#include <linux/slab.h>
-
-#include <linux/atomic.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include "ambassador.h"
-
-#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
-#define description_string "Madge ATM Ambassador driver"
-#define version_string "1.2.4"
-
-static inline void __init show_version (void) {
- printk ("%s version %s\n", description_string, version_string);
-}
-
-/*
-
- Theory of Operation
-
- I Hardware, detection, initialisation and shutdown.
-
- 1. Supported Hardware
-
- This driver is for the PCI ATMizer-based Ambassador card (except
- very early versions). It is not suitable for the similar EISA "TR7"
- card. Commercially, both cards are known as Collage Server ATM
- adapters.
-
- The loader supports image transfer to the card, image start and few
- other miscellaneous commands.
-
- Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023.
-
- The cards are big-endian.
-
- 2. Detection
-
- Standard PCI stuff, the early cards are detected and rejected.
-
- 3. Initialisation
-
- The cards are reset and the self-test results are checked. The
- microcode image is then transferred and started. This waits for a
- pointer to a descriptor containing details of the host-based queues
- and buffers and various parameters etc. Once they are processed
- normal operations may begin. The BIA is read using a microcode
- command.
-
- 4. Shutdown
-
- This may be accomplished either by a card reset or via the microcode
- shutdown command. Further investigation required.
-
- 5. Persistent state
-
- The card reset does not affect PCI configuration (good) or the
- contents of several other "shared run-time registers" (bad) which
- include doorbell and interrupt control as well as EEPROM and PCI
- control. The driver must be careful when modifying these registers
- not to touch bits it does not use and to undo any changes at exit.
-
- II Driver software
-
- 0. Generalities
-
- The adapter is quite intelligent (fast) and has a simple interface
- (few features). VPI is always zero, 1024 VCIs are supported. There
- is limited cell rate support. UBR channels can be capped and ABR
- (explicit rate, but not EFCI) is supported. There is no CBR or VBR
- support.
-
- 1. Driver <-> Adapter Communication
-
- Apart from the basic loader commands, the driver communicates
- through three entities: the command queue (CQ), the transmit queue
- pair (TXQ) and the receive queue pairs (RXQ). These three entities
- are set up by the host and passed to the microcode just after it has
- been started.
-
- All queues are host-based circular queues. They are contiguous and
- (due to hardware limitations) have some restrictions as to their
- locations in (bus) memory. They are of the "full means the same as
- empty so don't do that" variety since the adapter uses pointers
- internally.
-
- The queue pairs work as follows: one queue is for supply to the
- adapter, items in it are pending and are owned by the adapter; the
- other is the queue for return from the adapter, items in it have
- been dealt with by the adapter. The host adds items to the supply
- (TX descriptors and free RX buffer descriptors) and removes items
- from the return (TX and RX completions). The adapter deals with out
- of order completions.
-
- Interrupts (card to host) and the doorbell (host to card) are used
- for signalling.
-
- 1. CQ
-
- This is to communicate "open VC", "close VC", "get stats" etc. to
- the adapter. At most one command is retired every millisecond by the
- card. There is no out of order completion or notification. The
- driver needs to check the return code of the command, waiting as
- appropriate.
-
- 2. TXQ
-
- TX supply items are of variable length (scatter gather support) and
- so the queue items are (more or less) pointers to the real thing.
- Each TX supply item contains a unique, host-supplied handle (the skb
- bus address seems most sensible as this works for Alphas as well,
- there is no need to do any endian conversions on the handles).
-
- TX return items consist of just the handles above.
-
- 3. RXQ (up to 4 of these with different lengths and buffer sizes)
-
- RX supply items consist of a unique, host-supplied handle (the skb
- bus address again) and a pointer to the buffer data area.
-
- RX return items consist of the handle above, the VC, length and a
- status word. This just screams "oh so easy" doesn't it?
-
- Note on RX pool sizes:
-
- Each pool should have enough buffers to handle a back-to-back stream
- of minimum sized frames on a single VC. For example:
-
- frame spacing = 3us (about right)
-
- delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess)
-
- min number of buffers for one VC = 1 + delay/spacing (buffers)
-
- delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up)
-
- The 20us delay assumes that there is no need to sleep; if we need to
- sleep to get buffers we are going to drop frames anyway.
-
- In fact, each pool should have enough buffers to support the
- simultaneous reassembly of a separate frame on each VC and cope with
- the case in which frames complete in round robin cell fashion on
- each VC.
-
- Only one frame can complete at each cell arrival, so if "n" VCs are
- open, the worst case is to have them all complete frames together
- followed by all starting new frames together.
-
- desired number of buffers = n + delay/spacing
-
- These are the extreme requirements, however, they are "n+k" for some
- "k" so we have only the constant to choose. This is the argument
- rx_lats which current defaults to 7.
-
- Actually, "n ? n+k : 0" is better and this is what is implemented,
- subject to the limit given by the pool size.
-
- 4. Driver locking
-
- Simple spinlocks are used around the TX and RX queue mechanisms.
- Anyone with a faster, working method is welcome to implement it.
-
- The adapter command queue is protected with a spinlock. We always
- wait for commands to complete.
-
- A more complex form of locking is used around parts of the VC open
- and close functions. There are three reasons for a lock: 1. we need
- to do atomic rate reservation and release (not used yet), 2. Opening
- sometimes involves two adapter commands which must not be separated
- by another command on the same VC, 3. the changes to RX pool size
- must be atomic. The lock needs to work over context switches, so we
- use a semaphore.
-
- III Hardware Features and Microcode Bugs
-
- 1. Byte Ordering
-
- *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*!
-
- 2. Memory access
-
- All structures that are not accessed using DMA must be 4-byte
- aligned (not a problem) and must not cross 4MB boundaries.
-
- There is a DMA memory hole at E0000000-E00000FF (groan).
-
- TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB
- but for a hardware bug).
-
- RX buffers (DMA write) must not cross 16MB boundaries and must
- include spare trailing bytes up to the next 4-byte boundary; they
- will be written with rubbish.
-
- The PLX likes to prefetch; if reading up to 4 u32 past the end of
- each TX fragment is not a problem, then TX can be made to go a
- little faster by passing a flag at init that disables a prefetch
- workaround. We do not pass this flag. (new microcode only)
-
- Now we:
- . Note that alloc_skb rounds up size to a 16byte boundary.
- . Ensure all areas do not traverse 4MB boundaries.
- . Ensure all areas do not start at a E00000xx bus address.
- (I cannot be certain, but this may always hold with Linux)
- . Make all failures cause a loud message.
- . Discard non-conforming SKBs (causes TX failure or RX fill delay).
- . Discard non-conforming TX fragment descriptors (the TX fails).
- In the future we could:
- . Allow RX areas that traverse 4MB (but not 16MB) boundaries.
- . Segment TX areas into some/more fragments, when necessary.
- . Relax checks for non-DMA items (ignore hole).
- . Give scatter-gather (iovec) requirements using ???. (?)
-
- 3. VC close is broken (only for new microcode)
-
- The VC close adapter microcode command fails to do anything if any
- frames have been received on the VC but none have been transmitted.
- Frames continue to be reassembled and passed (with IRQ) to the
- driver.
-
- IV To Do List
-
- . Fix bugs!
-
- . Timer code may be broken.
-
- . Deal with buggy VC close (somehow) in microcode 12.
-
- . Handle interrupted and/or non-blocking writes - is this a job for
- the protocol layer?
-
- . Add code to break up TX fragments when they span 4MB boundaries.
-
- . Add SUNI phy layer (need to know where SUNI lives on card).
-
- . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b)
- leave extra headroom space for Ambassador TX descriptors.
-
- . Understand these elements of struct atm_vcc: recvq (proto?),
- sleep, callback, listenq, backlog_quota, reply and user_back.
-
- . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable).
-
- . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow.
-
- . Decide whether RX buffer recycling is or can be made completely safe;
- turn it back on. It looks like Werner is going to axe this.
-
- . Implement QoS changes on open VCs (involves extracting parts of VC open
- and close into separate functions and using them to make changes).
-
- . Hack on command queue so that someone can issue multiple commands and wait
- on the last one (OR only "no-op" or "wait" commands are waited for).
-
- . Eliminate need for while-schedule around do_command.
-
-*/
-
-static void do_housekeeping (struct timer_list *t);
-/********** globals **********/
-
-static unsigned short debug = 0;
-static unsigned int cmds = 8;
-static unsigned int txs = 32;
-static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 };
-static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 };
-static unsigned int rx_lats = 7;
-static unsigned char pci_lat = 0;
-
-static const unsigned long onegigmask = -1 << 30;
-
-/********** access to adapter **********/
-
-static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) {
- PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data);
-#ifdef AMB_MMIO
- dev->membase[addr / sizeof(u32)] = data;
-#else
- outl (data, dev->iobase + addr);
-#endif
-}
-
-static inline u32 rd_plain (const amb_dev * dev, size_t addr) {
-#ifdef AMB_MMIO
- u32 data = dev->membase[addr / sizeof(u32)];
-#else
- u32 data = inl (dev->iobase + addr);
-#endif
- PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data);
- return data;
-}
-
-static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) {
- __be32 be = cpu_to_be32 (data);
- PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be);
-#ifdef AMB_MMIO
- dev->membase[addr / sizeof(u32)] = be;
-#else
- outl (be, dev->iobase + addr);
-#endif
-}
-
-static inline u32 rd_mem (const amb_dev * dev, size_t addr) {
-#ifdef AMB_MMIO
- __be32 be = dev->membase[addr / sizeof(u32)];
-#else
- __be32 be = inl (dev->iobase + addr);
-#endif
- u32 data = be32_to_cpu (be);
- PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be);
- return data;
-}
-
-/********** dump routines **********/
-
-static inline void dump_registers (const amb_dev * dev) {
-#ifdef DEBUG_AMBASSADOR
- if (debug & DBG_REGS) {
- size_t i;
- PRINTD (DBG_REGS, "reading PLX control: ");
- for (i = 0x00; i < 0x30; i += sizeof(u32))
- rd_mem (dev, i);
- PRINTD (DBG_REGS, "reading mailboxes: ");
- for (i = 0x40; i < 0x60; i += sizeof(u32))
- rd_mem (dev, i);
- PRINTD (DBG_REGS, "reading doorb irqev irqen reset:");
- for (i = 0x60; i < 0x70; i += sizeof(u32))
- rd_mem (dev, i);
- }
-#else
- (void) dev;
-#endif
- return;
-}
-
-static inline void dump_loader_block (volatile loader_block * lb) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:",
- lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command));
- for (i = 0; i < MAX_COMMAND_DATA; ++i)
- PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i]));
- PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid));
-#else
- (void) lb;
-#endif
- return;
-}
-
-static inline void dump_command (command * cmd) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:",
- cmd, /*be32_to_cpu*/ (cmd->request));
- for (i = 0; i < 3; ++i)
- PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i]));
- PRINTDE (DBG_CMD, "");
-#else
- (void) cmd;
-#endif
- return;
-}
-
-static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- unsigned char * data = skb->data;
- PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
- for (i=0; i<skb->len && i < 256;i++)
- PRINTDM (DBG_DATA, "%02x ", data[i]);
- PRINTDE (DBG_DATA,"");
-#else
- (void) prefix;
- (void) vc;
- (void) skb;
-#endif
- return;
-}
-
-/********** check memory areas for use by Ambassador **********/
-
-/* see limitations under Hardware Features */
-
-static int check_area (void * start, size_t length) {
- // assumes length > 0
- const u32 fourmegmask = -1 << 22;
- const u32 twofivesixmask = -1 << 8;
- const u32 starthole = 0xE0000000;
- u32 startaddress = virt_to_bus (start);
- u32 lastaddress = startaddress+length-1;
- if ((startaddress ^ lastaddress) & fourmegmask ||
- (startaddress & twofivesixmask) == starthole) {
- PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!",
- startaddress, lastaddress);
- return -1;
- } else {
- return 0;
- }
-}
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-static void amb_kfree_skb (struct sk_buff * skb) {
- if (ATM_SKB(skb)->vcc->pop) {
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- } else {
- dev_kfree_skb_any (skb);
- }
-}
-
-/********** TX completion **********/
-
-static void tx_complete (amb_dev * dev, tx_out * tx) {
- tx_simple * tx_descr = bus_to_virt (tx->handle);
- struct sk_buff * skb = tx_descr->skb;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
-
- // VC layer stats
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the descriptor
- kfree (tx_descr);
-
- // free the skb
- amb_kfree_skb (skb);
-
- dev->stats.tx_ok++;
- return;
-}
-
-/********** RX completion **********/
-
-static void rx_complete (amb_dev * dev, rx_out * rx) {
- struct sk_buff * skb = bus_to_virt (rx->handle);
- u16 vc = be16_to_cpu (rx->vc);
- // unused: u16 lec_id = be16_to_cpu (rx->lec_id);
- u16 status = be16_to_cpu (rx->status);
- u16 rx_len = be16_to_cpu (rx->length);
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len);
-
- // XXX move this in and add to VC stats ???
- if (!status) {
- struct atm_vcc * atm_vcc = dev->rxer[vc];
- dev->stats.rx.ok++;
-
- if (atm_vcc) {
-
- if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
-
- if (atm_charge (atm_vcc, skb->truesize)) {
-
- // prepare socket buffer
- ATM_SKB(skb)->vcc = atm_vcc;
- skb_put (skb, rx_len);
-
- dump_skb ("<<<", vc, skb);
-
- // VC layer stats
- atomic_inc(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- atm_vcc->push (atm_vcc, skb);
- return;
-
- } else {
- // someone fix this (message), please!
- PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
- // drop stats incremented in atm_charge
- }
-
- } else {
- PRINTK (KERN_INFO, "dropped over-size frame");
- // should we count this?
- atomic_inc(&atm_vcc->stats->rx_drop);
- }
-
- } else {
- PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc);
- // this is an adapter bug, only in new version of microcode
- }
-
- } else {
- dev->stats.rx.error++;
- if (status & CRC_ERR)
- dev->stats.rx.badcrc++;
- if (status & LEN_ERR)
- dev->stats.rx.toolong++;
- if (status & ABORT_ERR)
- dev->stats.rx.aborted++;
- if (status & UNUSED_ERR)
- dev->stats.rx.unused++;
- }
-
- dev_kfree_skb_any (skb);
- return;
-}
-
-/*
-
- Note on queue handling.
-
- Here "give" and "take" refer to queue entries and a queue (pair)
- rather than frames to or from the host or adapter. Empty frame
- buffers are given to the RX queue pair and returned unused or
- containing RX frames. TX frames (well, pointers to TX fragment
- lists) are given to the TX queue pair, completions are returned.
-
-*/
-
-/********** command queue **********/
-
-// I really don't like this, but it's the best I can do at the moment
-
-// also, the callers are responsible for byte order as the microcode
-// sometimes does 16-bit accesses (yuk yuk yuk)
-
-static int command_do (amb_dev * dev, command * cmd) {
- amb_cq * cq = &dev->cq;
- volatile amb_cq_ptrs * ptrs = &cq->ptrs;
- command * my_slot;
-
- PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev);
-
- if (test_bit (dead, &dev->flags))
- return 0;
-
- spin_lock (&cq->lock);
-
- // if not full...
- if (cq->pending < cq->maximum) {
- // remember my slot for later
- my_slot = ptrs->in;
- PRINTD (DBG_CMD, "command in slot %p", my_slot);
-
- dump_command (cmd);
-
- // copy command in
- *ptrs->in = *cmd;
- cq->pending++;
- ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit);
-
- // mail the command
- wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in));
-
- if (cq->pending > cq->high)
- cq->high = cq->pending;
- spin_unlock (&cq->lock);
-
- // these comments were in a while-loop before, msleep removes the loop
- // go to sleep
- // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout);
- msleep(cq->pending);
-
- // wait for my slot to be reached (all waiters are here or above, until...)
- while (ptrs->out != my_slot) {
- PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
-
- // wait on my slot (... one gets to its slot, and... )
- while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) {
- PRINTD (DBG_CMD, "wait: command slot completion");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
-
- PRINTD (DBG_CMD, "command complete");
- // update queue (... moves the queue along to the next slot)
- spin_lock (&cq->lock);
- cq->pending--;
- // copy command out
- *cmd = *ptrs->out;
- ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit);
- spin_unlock (&cq->lock);
-
- return 0;
- } else {
- cq->filled++;
- spin_unlock (&cq->lock);
- return -EAGAIN;
- }
-
-}
-
-/********** TX queue pair **********/
-
-static int tx_give (amb_dev * dev, tx_in * tx) {
- amb_txq * txq = &dev->txq;
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev);
-
- if (test_bit (dead, &dev->flags))
- return 0;
-
- spin_lock_irqsave (&txq->lock, flags);
-
- if (txq->pending < txq->maximum) {
- PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr);
-
- *txq->in.ptr = *tx;
- txq->pending++;
- txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit);
- // hand over the TX and ring the bell
- wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr));
- wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME);
-
- if (txq->pending > txq->high)
- txq->high = txq->pending;
- spin_unlock_irqrestore (&txq->lock, flags);
- return 0;
- } else {
- txq->filled++;
- spin_unlock_irqrestore (&txq->lock, flags);
- return -EAGAIN;
- }
-}
-
-static int tx_take (amb_dev * dev) {
- amb_txq * txq = &dev->txq;
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev);
-
- spin_lock_irqsave (&txq->lock, flags);
-
- if (txq->pending && txq->out.ptr->handle) {
- // deal with TX completion
- tx_complete (dev, txq->out.ptr);
- // mark unused again
- txq->out.ptr->handle = 0;
- // remove item
- txq->pending--;
- txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit);
-
- spin_unlock_irqrestore (&txq->lock, flags);
- return 0;
- } else {
-
- spin_unlock_irqrestore (&txq->lock, flags);
- return -1;
- }
-}
-
-/********** RX queue pairs **********/
-
-static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
-
- spin_lock_irqsave (&rxq->lock, flags);
-
- if (rxq->pending < rxq->maximum) {
- PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
-
- *rxq->in.ptr = *rx;
- rxq->pending++;
- rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
- // hand over the RX buffer
- wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
-
- spin_unlock_irqrestore (&rxq->lock, flags);
- return 0;
- } else {
- spin_unlock_irqrestore (&rxq->lock, flags);
- return -1;
- }
-}
-
-static int rx_take (amb_dev * dev, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
-
- spin_lock_irqsave (&rxq->lock, flags);
-
- if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
- // deal with RX completion
- rx_complete (dev, rxq->out.ptr);
- // mark unused again
- rxq->out.ptr->status = 0;
- rxq->out.ptr->length = 0;
- // remove item
- rxq->pending--;
- rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
-
- if (rxq->pending < rxq->low)
- rxq->low = rxq->pending;
- spin_unlock_irqrestore (&rxq->lock, flags);
- return 0;
- } else {
- if (!rxq->pending && rxq->buffers_wanted)
- rxq->emptied++;
- spin_unlock_irqrestore (&rxq->lock, flags);
- return -1;
- }
-}
-
-/********** RX Pool handling **********/
-
-/* pre: buffers_wanted = 0, post: pending = 0 */
-static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
-
- PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
-
- if (test_bit (dead, &dev->flags))
- return;
-
- /* we are not quite like the fill pool routines as we cannot just
- remove one buffer, we have to remove all of them, but we might as
- well pretend... */
- if (rxq->pending > rxq->buffers_wanted) {
- command cmd;
- cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q);
- cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- /* the pool may also be emptied via the interrupt handler */
- while (rxq->pending > rxq->buffers_wanted)
- if (rx_take (dev, pool))
- schedule();
- }
-
- return;
-}
-
-static void drain_rx_pools (amb_dev * dev) {
- unsigned char pool;
-
- PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- drain_rx_pool (dev, pool);
-}
-
-static void fill_rx_pool (amb_dev * dev, unsigned char pool,
- gfp_t priority)
-{
- rx_in rx;
- amb_rxq * rxq;
-
- PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
-
- if (test_bit (dead, &dev->flags))
- return;
-
- rxq = &dev->rxq[pool];
- while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
-
- struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
- if (!skb) {
- PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
- return;
- }
- if (check_area (skb->data, skb->truesize)) {
- dev_kfree_skb_any (skb);
- return;
- }
- // cast needed as there is no %? for pointer differences
- PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
- skb, skb->head, (long) skb_end_offset(skb));
- rx.handle = virt_to_bus (skb);
- rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
- if (rx_give (dev, &rx, pool))
- dev_kfree_skb_any (skb);
-
- }
-
- return;
-}
-
-// top up all RX pools
-static void fill_rx_pools (amb_dev * dev) {
- unsigned char pool;
-
- PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- fill_rx_pool (dev, pool, GFP_ATOMIC);
-
- return;
-}
-
-/********** enable host interrupts **********/
-
-static void interrupts_on (amb_dev * dev) {
- wr_plain (dev, offsetof(amb_mem, interrupt_control),
- rd_plain (dev, offsetof(amb_mem, interrupt_control))
- | AMB_INTERRUPT_BITS);
-}
-
-/********** disable host interrupts **********/
-
-static void interrupts_off (amb_dev * dev) {
- wr_plain (dev, offsetof(amb_mem, interrupt_control),
- rd_plain (dev, offsetof(amb_mem, interrupt_control))
- &~ AMB_INTERRUPT_BITS);
-}
-
-/********** interrupt handling **********/
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id) {
- amb_dev * dev = dev_id;
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id);
-
- {
- u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt));
-
- // for us or someone else sharing the same interrupt
- if (!interrupt) {
- PRINTD (DBG_IRQ, "irq not for me: %d", irq);
- return IRQ_NONE;
- }
-
- // definitely for us
- PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt);
- wr_plain (dev, offsetof(amb_mem, interrupt), -1);
- }
-
- {
- unsigned int irq_work = 0;
- unsigned char pool;
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- while (!rx_take (dev, pool))
- ++irq_work;
- while (!tx_take (dev))
- ++irq_work;
-
- if (irq_work) {
- fill_rx_pools (dev);
-
- PRINTD (DBG_IRQ, "work done: %u", irq_work);
- } else {
- PRINTD (DBG_IRQ|DBG_WARN, "no work done");
- }
- }
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
- return IRQ_HANDLED;
-}
-
-/********** make rate (not quite as much fun as Horizon) **********/
-
-static int make_rate (unsigned int rate, rounding r,
- u16 * bits, unsigned int * actual) {
- unsigned char exp = -1; // hush gcc
- unsigned int man = -1; // hush gcc
-
- PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate);
-
- // rates in cells per second, ITU format (nasty 16-bit floating-point)
- // given 5-bit e and 9-bit m:
- // rate = EITHER (1+m/2^9)*2^e OR 0
- // bits = EITHER 1<<14 | e<<9 | m OR 0
- // (bit 15 is "reserved", bit 14 "non-zero")
- // smallest rate is 0 (special representation)
- // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
- // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
- // simple algorithm:
- // find position of top bit, this gives e
- // remove top bit and shift (rounding if feeling clever) by 9-e
-
- // ucode bug: please don't set bit 14! so 0 rate not representable
-
- if (rate > 0xffc00000U) {
- // larger than largest representable rate
-
- if (r == round_up) {
- return -EINVAL;
- } else {
- exp = 31;
- man = 511;
- }
-
- } else if (rate) {
- // representable rate
-
- exp = 31;
- man = rate;
-
- // invariant: rate = man*2^(exp-31)
- while (!(man & (1<<31))) {
- exp = exp - 1;
- man = man<<1;
- }
-
- // man has top bit set
- // rate = (2^31+(man-2^31))*2^(exp-31)
- // rate = (1+(man-2^31)/2^31)*2^exp
- man = man<<1;
- man &= 0xffffffffU; // a nop on 32-bit systems
- // rate = (1+man/2^32)*2^exp
-
- // exp is in the range 0 to 31, man is in the range 0 to 2^32-1
- // time to lose significance... we want m in the range 0 to 2^9-1
- // rounding presents a minor problem... we first decide which way
- // we are rounding (based on given rounding direction and possibly
- // the bits of the mantissa that are to be discarded).
-
- switch (r) {
- case round_down: {
- // just truncate
- man = man>>(32-9);
- break;
- }
- case round_up: {
- // check all bits that we are discarding
- if (man & (~0U>>9)) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- // no need to check for round up outside of range
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- case round_nearest: {
- // check msb that we are discarding
- if (man & (1<<(32-9-1))) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- // no need to check for round up outside of range
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- }
-
- } else {
- // zero rate - not representable
-
- if (r == round_down) {
- return -EINVAL;
- } else {
- exp = 0;
- man = 0;
- }
-
- }
-
- PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp);
-
- if (bits)
- *bits = /* (1<<14) | */ (exp<<9) | man;
-
- if (actual)
- *actual = (exp >= 9)
- ? (1 << exp) + (man << (exp-9))
- : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
-
- return 0;
-}
-
-/********** Linux ATM Operations **********/
-
-// some are not yet implemented while others do not make sense for
-// this device
-
-/********** Open a VC **********/
-
-static int amb_open (struct atm_vcc * atm_vcc)
-{
- int error;
-
- struct atm_qos * qos;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
- u16 tx_rate_bits = -1; // hush gcc
- u16 tx_vc_bits = -1; // hush gcc
- u16 tx_frame_bits = -1; // hush gcc
-
- amb_dev * dev = AMB_DEV(atm_vcc->dev);
- amb_vcc * vcc;
- unsigned char pool = -1; // hush gcc
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
-
- PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci);
-
-#ifdef ATM_VPI_UNSPEC
- // UNSPEC is deprecated, remove this code eventually
- if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
- PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
- return -EINVAL;
- }
-#endif
-
- if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) &&
- 0 <= vci && vci < (1<<NUM_VCI_BITS))) {
- PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
- return -EINVAL;
- }
-
- qos = &atm_vcc->qos;
-
- if (qos->aal != ATM_AAL5) {
- PRINTD (DBG_QOS, "AAL not supported");
- return -EINVAL;
- }
-
- // traffic parameters
-
- PRINTD (DBG_QOS, "TX:");
- txtp = &qos->txtp;
- if (txtp->traffic_class != ATM_NONE) {
- switch (txtp->traffic_class) {
- case ATM_UBR: {
- // we take "the PCR" as a rate-cap
- int pcr = atm_pcr_goal (txtp);
- if (!pcr) {
- // no rate cap
- tx_rate_bits = 0;
- tx_vc_bits = TX_UBR;
- tx_frame_bits = TX_FRAME_NOTCAP;
- } else {
- rounding r;
- if (pcr < 0) {
- r = round_down;
- pcr = -pcr;
- } else {
- r = round_up;
- }
- error = make_rate (pcr, r, &tx_rate_bits, NULL);
- if (error)
- return error;
- tx_vc_bits = TX_UBR_CAPPED;
- tx_frame_bits = TX_FRAME_CAPPED;
- }
- break;
- }
-#if 0
- case ATM_ABR: {
- pcr = atm_pcr_goal (txtp);
- PRINTD (DBG_QOS, "pcr goal = %d", pcr);
- break;
- }
-#endif
- default: {
- // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
- PRINTD (DBG_QOS, "request for non-UBR denied");
- return -EINVAL;
- }
- }
- PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx",
- tx_rate_bits, tx_vc_bits);
- }
-
- PRINTD (DBG_QOS, "RX:");
- rxtp = &qos->rxtp;
- if (rxtp->traffic_class == ATM_NONE) {
- // do nothing
- } else {
- // choose an RX pool (arranged in increasing size)
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
- PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)",
- pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
- break;
- }
- if (pool == NUM_RX_POOLS) {
- PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL,
- "no pool suitable for VC (RX max_sdu %d is too large)",
- rxtp->max_sdu);
- return -EINVAL;
- }
-
- switch (rxtp->traffic_class) {
- case ATM_UBR: {
- break;
- }
-#if 0
- case ATM_ABR: {
- pcr = atm_pcr_goal (rxtp);
- PRINTD (DBG_QOS, "pcr goal = %d", pcr);
- break;
- }
-#endif
- default: {
- // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
- PRINTD (DBG_QOS, "request for non-UBR denied");
- return -EINVAL;
- }
- }
- }
-
- // get space for our vcc stuff
- vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL);
- if (!vcc) {
- PRINTK (KERN_ERR, "out of memory!");
- return -ENOMEM;
- }
- atm_vcc->dev_data = (void *) vcc;
-
- // no failures beyond this point
-
- // we are not really "immediately before allocating the connection
- // identifier in hardware", but it will just have to do!
- set_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- if (txtp->traffic_class != ATM_NONE) {
- command cmd;
-
- vcc->tx_frame_bits = tx_frame_bits;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->rxer[vci]) {
- // RXer on the channel already, just modify rate...
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
- cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- // ... and TX flags, preserving the RX pool
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
- | (tx_vc_bits << SRB_FLAGS_SHIFT) );
- while (command_do (dev, &cmd))
- schedule();
- } else {
- // no RXer on the channel, just open (with pool zero)
- cmd.request = cpu_to_be32 (SRB_OPEN_VC);
- cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT);
- cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- }
- dev->txer[vci].tx_present = 1;
- mutex_unlock(&dev->vcc_sf);
- }
-
- if (rxtp->traffic_class != ATM_NONE) {
- command cmd;
-
- vcc->rx_info.pool = pool;
-
- mutex_lock(&dev->vcc_sf);
- /* grow RX buffer pool */
- if (!dev->rxq[pool].buffers_wanted)
- dev->rxq[pool].buffers_wanted = rx_lats;
- dev->rxq[pool].buffers_wanted += 1;
- fill_rx_pool (dev, pool, GFP_KERNEL);
-
- if (dev->txer[vci].tx_present) {
- // TXer on the channel already
- // switch (from pool zero) to this pool, preserving the TX bits
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- ( (pool << SRB_POOL_SHIFT)
- | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) );
- } else {
- // no TXer on the channel, open the VC (with no rate info)
- cmd.request = cpu_to_be32 (SRB_OPEN_VC);
- cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
- cmd.args.open.rate = cpu_to_be32 (0);
- }
- while (command_do (dev, &cmd))
- schedule();
- // this link allows RX frames through
- dev->rxer[vci] = atm_vcc;
- mutex_unlock(&dev->vcc_sf);
- }
-
- // indicate readiness
- set_bit(ATM_VF_READY,&atm_vcc->flags);
-
- return 0;
-}
-
-/********** Close a VC **********/
-
-static void amb_close (struct atm_vcc * atm_vcc) {
- amb_dev * dev = AMB_DEV (atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC (atm_vcc);
- u16 vci = atm_vcc->vci;
-
- PRINTD (DBG_VCC|DBG_FLOW, "amb_close");
-
- // indicate unreadiness
- clear_bit(ATM_VF_READY,&atm_vcc->flags);
-
- // disable TXing
- if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
- command cmd;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->rxer[vci]) {
- // RXer still on the channel, just modify rate... XXX not really needed
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
- cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_rate.rate = cpu_to_be32 (0);
- // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool
- } else {
- // no RXer on the channel, close channel
- cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
- cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
- }
- dev->txer[vci].tx_present = 0;
- while (command_do (dev, &cmd))
- schedule();
- mutex_unlock(&dev->vcc_sf);
- }
-
- // disable RXing
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
- command cmd;
-
- // this is (the?) one reason why we need the amb_vcc struct
- unsigned char pool = vcc->rx_info.pool;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->txer[vci].tx_present) {
- // TXer still on the channel, just go to pool zero XXX not really needed
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT);
- } else {
- // no TXer on the channel, close the VC
- cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
- cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
- }
- // forget the rxer - no more skbs will be pushed
- if (atm_vcc != dev->rxer[vci])
- PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p",
- "arghhh! we're going to die!",
- vcc, dev->rxer[vci]);
- dev->rxer[vci] = NULL;
- while (command_do (dev, &cmd))
- schedule();
-
- /* shrink RX buffer pool */
- dev->rxq[pool].buffers_wanted -= 1;
- if (dev->rxq[pool].buffers_wanted == rx_lats) {
- dev->rxq[pool].buffers_wanted = 0;
- drain_rx_pool (dev, pool);
- }
- mutex_unlock(&dev->vcc_sf);
- }
-
- // free our structure
- kfree (vcc);
-
- // say the VPI/VCI is free again
- clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- return;
-}
-
-/********** Send **********/
-
-static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- amb_dev * dev = AMB_DEV(atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC(atm_vcc);
- u16 vc = atm_vcc->vci;
- unsigned int tx_len = skb->len;
- unsigned char * tx_data = skb->data;
- tx_simple * tx_descr;
- tx_in tx;
-
- if (test_bit (dead, &dev->flags))
- return -EIO;
-
- PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u",
- vc, tx_data, tx_len);
-
- dump_skb (">>>", vc, skb);
-
- if (!dev->txer[vc].tx_present) {
- PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc);
- return -EBADFD;
- }
-
- // this is a driver private field so we have to set it ourselves,
- // despite the fact that we are _required_ to use it to check for a
- // pop function
- ATM_SKB(skb)->vcc = atm_vcc;
-
- if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
- PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
- return -EIO;
- }
-
- if (check_area (skb->data, skb->len)) {
- atomic_inc(&atm_vcc->stats->tx_err);
- return -ENOMEM; // ?
- }
-
- // allocate memory for fragments
- tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL);
- if (!tx_descr) {
- PRINTK (KERN_ERR, "could not allocate TX descriptor");
- return -ENOMEM;
- }
- if (check_area (tx_descr, sizeof(tx_simple))) {
- kfree (tx_descr);
- return -ENOMEM;
- }
- PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr);
-
- tx_descr->skb = skb;
-
- tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len);
- tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data));
-
- tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr);
- tx_descr->tx_frag_end.vc = 0;
- tx_descr->tx_frag_end.next_descriptor_length = 0;
- tx_descr->tx_frag_end.next_descriptor = 0;
-#ifdef AMB_NEW_MICROCODE
- tx_descr->tx_frag_end.cpcs_uu = 0;
- tx_descr->tx_frag_end.cpi = 0;
- tx_descr->tx_frag_end.pad = 0;
-#endif
-
- tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc);
- tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end));
- tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag));
-
- while (tx_give (dev, &tx))
- schedule();
- return 0;
-}
-
-/********** Change QoS on a VC **********/
-
-// int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags);
-
-/********** Free RX Socket Buffer **********/
-
-#if 0
-static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- amb_dev * dev = AMB_DEV (atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC (atm_vcc);
- unsigned char pool = vcc->rx_info.pool;
- rx_in rx;
-
- // This may be unsafe for various reasons that I cannot really guess
- // at. However, I note that the ATM layer calls kfree_skb rather
- // than dev_kfree_skb at this point so we are least covered as far
- // as buffer locking goes. There may be bugs if pcap clones RX skbs.
-
- PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)",
- skb, atm_vcc, vcc);
-
- rx.handle = virt_to_bus (skb);
- rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
-
- skb->data = skb->head;
- skb_reset_tail_pointer(skb);
- skb->len = 0;
-
- if (!rx_give (dev, &rx, pool)) {
- // success
- PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
- return;
- }
-
- // just do what the ATM layer would have done
- dev_kfree_skb_any (skb);
-
- return;
-}
-#endif
-
-/********** Proc File Output **********/
-
-static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
- amb_dev * dev = AMB_DEV (atm_dev);
- int left = *pos;
- unsigned char pool;
-
- PRINTD (DBG_FLOW, "amb_proc_read");
-
- /* more diagnostics here? */
-
- if (!left--) {
- amb_stats * s = &dev->stats;
- return sprintf (page,
- "frames: TX OK %lu, RX OK %lu, RX bad %lu "
- "(CRC %lu, long %lu, aborted %lu, unused %lu).\n",
- s->tx_ok, s->rx.ok, s->rx.error,
- s->rx.badcrc, s->rx.toolong,
- s->rx.aborted, s->rx.unused);
- }
-
- if (!left--) {
- amb_cq * c = &dev->cq;
- return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
- c->pending, c->high, c->maximum);
- }
-
- if (!left--) {
- amb_txq * t = &dev->txq;
- return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
- t->pending, t->maximum, t->high, t->filled);
- }
-
- if (!left--) {
- unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- amb_rxq * r = &dev->rxq[pool];
- count += sprintf (page+count, " %u/%u/%u %u %u",
- r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied);
- }
- count += sprintf (page+count, ".\n");
- return count;
- }
-
- if (!left--) {
- unsigned int count = sprintf (page, "RX buffer sizes:");
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- amb_rxq * r = &dev->rxq[pool];
- count += sprintf (page+count, " %u", r->buffer_size);
- }
- count += sprintf (page+count, ".\n");
- return count;
- }
-
-#if 0
- if (!left--) {
- // suni block etc?
- }
-#endif
-
- return 0;
-}
-
-/********** Operation Structure **********/
-
-static const struct atmdev_ops amb_ops = {
- .open = amb_open,
- .close = amb_close,
- .send = amb_send,
- .proc_read = amb_proc_read,
- .owner = THIS_MODULE,
-};
-
-/********** housekeeping **********/
-static void do_housekeeping (struct timer_list *t) {
- amb_dev * dev = from_timer(dev, t, housekeeping);
-
- // could collect device-specific (not driver/atm-linux) stats here
-
- // last resort refill once every ten seconds
- fill_rx_pools (dev);
- mod_timer(&dev->housekeeping, jiffies + 10*HZ);
-
- return;
-}
-
-/********** creation of communication queues **********/
-
-static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs,
- unsigned int *rxs, unsigned int *rx_buffer_sizes)
-{
- unsigned char pool;
- size_t total = 0;
- void * memory;
- void * limit;
-
- PRINTD (DBG_FLOW, "create_queues %p", dev);
-
- total += cmds * sizeof(command);
-
- total += txs * (sizeof(tx_in) + sizeof(tx_out));
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
-
- memory = kmalloc (total, GFP_KERNEL);
- if (!memory) {
- PRINTK (KERN_ERR, "could not allocate queues");
- return -ENOMEM;
- }
- if (check_area (memory, total)) {
- PRINTK (KERN_ERR, "queues allocated in nasty area");
- kfree (memory);
- return -ENOMEM;
- }
-
- limit = memory + total;
- PRINTD (DBG_INIT, "queues from %p to %p", memory, limit);
-
- PRINTD (DBG_CMD, "command queue at %p", memory);
-
- {
- command * cmd = memory;
- amb_cq * cq = &dev->cq;
-
- cq->pending = 0;
- cq->high = 0;
- cq->maximum = cmds - 1;
-
- cq->ptrs.start = cmd;
- cq->ptrs.in = cmd;
- cq->ptrs.out = cmd;
- cq->ptrs.limit = cmd + cmds;
-
- memory = cq->ptrs.limit;
- }
-
- PRINTD (DBG_TX, "TX queue pair at %p", memory);
-
- {
- tx_in * in = memory;
- tx_out * out;
- amb_txq * txq = &dev->txq;
-
- txq->pending = 0;
- txq->high = 0;
- txq->filled = 0;
- txq->maximum = txs - 1;
-
- txq->in.start = in;
- txq->in.ptr = in;
- txq->in.limit = in + txs;
-
- memory = txq->in.limit;
- out = memory;
-
- txq->out.start = out;
- txq->out.ptr = out;
- txq->out.limit = out + txs;
-
- memory = txq->out.limit;
- }
-
- PRINTD (DBG_RX, "RX queue pairs at %p", memory);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- rx_in * in = memory;
- rx_out * out;
- amb_rxq * rxq = &dev->rxq[pool];
-
- rxq->buffer_size = rx_buffer_sizes[pool];
- rxq->buffers_wanted = 0;
-
- rxq->pending = 0;
- rxq->low = rxs[pool] - 1;
- rxq->emptied = 0;
- rxq->maximum = rxs[pool] - 1;
-
- rxq->in.start = in;
- rxq->in.ptr = in;
- rxq->in.limit = in + rxs[pool];
-
- memory = rxq->in.limit;
- out = memory;
-
- rxq->out.start = out;
- rxq->out.ptr = out;
- rxq->out.limit = out + rxs[pool];
-
- memory = rxq->out.limit;
- }
-
- if (memory == limit) {
- return 0;
- } else {
- PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit);
- kfree (limit - total);
- return -ENOMEM;
- }
-
-}
-
-/********** destruction of communication queues **********/
-
-static void destroy_queues (amb_dev * dev) {
- // all queues assumed empty
- void * memory = dev->cq.ptrs.start;
- // includes txq.in, txq.out, rxq[].in and rxq[].out
-
- PRINTD (DBG_FLOW, "destroy_queues %p", dev);
-
- PRINTD (DBG_INIT, "freeing queues at %p", memory);
- kfree (memory);
-
- return;
-}
-
-/********** basic loader commands and error handling **********/
-// centisecond timeouts - guessing away here
-static unsigned int command_timeouts [] = {
- [host_memory_test] = 15,
- [read_adapter_memory] = 2,
- [write_adapter_memory] = 2,
- [adapter_start] = 50,
- [get_version_number] = 10,
- [interrupt_host] = 1,
- [flash_erase_sector] = 1,
- [adap_download_block] = 1,
- [adap_erase_flash] = 1,
- [adap_run_in_iram] = 1,
- [adap_end_download] = 1
-};
-
-
-static unsigned int command_successes [] = {
- [host_memory_test] = COMMAND_PASSED_TEST,
- [read_adapter_memory] = COMMAND_READ_DATA_OK,
- [write_adapter_memory] = COMMAND_WRITE_DATA_OK,
- [adapter_start] = COMMAND_COMPLETE,
- [get_version_number] = COMMAND_COMPLETE,
- [interrupt_host] = COMMAND_COMPLETE,
- [flash_erase_sector] = COMMAND_COMPLETE,
- [adap_download_block] = COMMAND_COMPLETE,
- [adap_erase_flash] = COMMAND_COMPLETE,
- [adap_run_in_iram] = COMMAND_COMPLETE,
- [adap_end_download] = COMMAND_COMPLETE
-};
-
-static int decode_loader_result (loader_command cmd, u32 result)
-{
- int res;
- const char *msg;
-
- if (result == command_successes[cmd])
- return 0;
-
- switch (result) {
- case BAD_COMMAND:
- res = -EINVAL;
- msg = "bad command";
- break;
- case COMMAND_IN_PROGRESS:
- res = -ETIMEDOUT;
- msg = "command in progress";
- break;
- case COMMAND_PASSED_TEST:
- res = 0;
- msg = "command passed test";
- break;
- case COMMAND_FAILED_TEST:
- res = -EIO;
- msg = "command failed test";
- break;
- case COMMAND_READ_DATA_OK:
- res = 0;
- msg = "command read data ok";
- break;
- case COMMAND_READ_BAD_ADDRESS:
- res = -EINVAL;
- msg = "command read bad address";
- break;
- case COMMAND_WRITE_DATA_OK:
- res = 0;
- msg = "command write data ok";
- break;
- case COMMAND_WRITE_BAD_ADDRESS:
- res = -EINVAL;
- msg = "command write bad address";
- break;
- case COMMAND_WRITE_FLASH_FAILURE:
- res = -EIO;
- msg = "command write flash failure";
- break;
- case COMMAND_COMPLETE:
- res = 0;
- msg = "command complete";
- break;
- case COMMAND_FLASH_ERASE_FAILURE:
- res = -EIO;
- msg = "command flash erase failure";
- break;
- case COMMAND_WRITE_BAD_DATA:
- res = -EINVAL;
- msg = "command write bad data";
- break;
- default:
- res = -EINVAL;
- msg = "unknown error";
- PRINTD (DBG_LOAD|DBG_ERR,
- "decode_loader_result got %d=%x !",
- result, result);
- break;
- }
-
- PRINTK (KERN_ERR, "%s", msg);
- return res;
-}
-
-static int do_loader_command(volatile loader_block *lb, const amb_dev *dev,
- loader_command cmd)
-{
-
- unsigned long timeout;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command");
-
- /* do a command
-
- Set the return value to zero, set the command type and set the
- valid entry to the right magic value. The payload is already
- correctly byte-ordered so we leave it alone. Hit the doorbell
- with the bus address of this structure.
-
- */
-
- lb->result = 0;
- lb->command = cpu_to_be32 (cmd);
- lb->valid = cpu_to_be32 (DMA_VALID);
- // dump_registers (dev);
- // dump_loader_block (lb);
- wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask);
-
- timeout = command_timeouts[cmd] * 10;
-
- while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd);
- dump_registers (dev);
- dump_loader_block (lb);
- return -ETIMEDOUT;
- }
-
- if (cmd == adapter_start) {
- // wait for start command to acknowledge...
- timeout = 100;
- while (rd_plain (dev, offsetof(amb_mem, doorbell)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x",
- be32_to_cpu (lb->result));
- dump_registers (dev);
- return -ETIMEDOUT;
- }
- return 0;
- } else {
- return decode_loader_result (cmd, be32_to_cpu (lb->result));
- }
-
-}
-
-/* loader: determine loader version */
-
-static int get_loader_version(loader_block *lb, const amb_dev *dev,
- u32 *version)
-{
- int res;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
-
- res = do_loader_command (lb, dev, get_version_number);
- if (res)
- return res;
- if (version)
- *version = be32_to_cpu (lb->payload.version);
- return 0;
-}
-
-/* loader: write memory data blocks */
-
-static int loader_write(loader_block *lb, const amb_dev *dev,
- const struct ihex_binrec *rec)
-{
- transfer_block * tb = &lb->payload.transfer;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
-
- tb->address = rec->addr;
- tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
- memcpy(tb->data, rec->data, be16_to_cpu(rec->len));
- return do_loader_command (lb, dev, write_adapter_memory);
-}
-
-/* loader: verify memory data blocks */
-
-static int loader_verify(loader_block *lb, const amb_dev *dev,
- const struct ihex_binrec *rec)
-{
- transfer_block * tb = &lb->payload.transfer;
- int res;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify");
-
- tb->address = rec->addr;
- tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
- res = do_loader_command (lb, dev, read_adapter_memory);
- if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len)))
- res = -EINVAL;
- return res;
-}
-
-/* loader: start microcode */
-
-static int loader_start(loader_block *lb, const amb_dev *dev, u32 address)
-{
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
-
- lb->payload.start = cpu_to_be32 (address);
- return do_loader_command (lb, dev, adapter_start);
-}
-
-/********** reset card **********/
-
-static inline void sf (const char * msg)
-{
- PRINTK (KERN_ERR, "self-test failed: %s", msg);
-}
-
-static int amb_reset (amb_dev * dev, int diags) {
- u32 word;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset");
-
- word = rd_plain (dev, offsetof(amb_mem, reset_control));
- // put card into reset state
- wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS);
- // wait a short while
- udelay (10);
-#if 1
- // put card into known good state
- wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS);
- // clear all interrupts just in case
- wr_plain (dev, offsetof(amb_mem, interrupt), -1);
-#endif
- // clear self-test done flag
- wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0);
- // take card out of reset state
- wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS);
-
- if (diags) {
- unsigned long timeout;
- // 4.2 second wait
- msleep(4200);
- // half second time-out
- timeout = 500;
- while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "reset timed out");
- return -ETIMEDOUT;
- }
-
- // get results of self-test
- // XXX double check byte-order
- word = rd_mem (dev, offsetof(amb_mem, mb.loader.result));
- if (word & SELF_TEST_FAILURE) {
- if (word & GPINT_TST_FAILURE)
- sf ("interrupt");
- if (word & SUNI_DATA_PATTERN_FAILURE)
- sf ("SUNI data pattern");
- if (word & SUNI_DATA_BITS_FAILURE)
- sf ("SUNI data bits");
- if (word & SUNI_UTOPIA_FAILURE)
- sf ("SUNI UTOPIA interface");
- if (word & SUNI_FIFO_FAILURE)
- sf ("SUNI cell buffer FIFO");
- if (word & SRAM_FAILURE)
- sf ("bad SRAM");
- // better return value?
- return -EIO;
- }
-
- }
- return 0;
-}
-
-/********** transfer and start the microcode **********/
-
-static int ucode_init(loader_block *lb, amb_dev *dev)
-{
- const struct firmware *fw;
- unsigned long start_address;
- const struct ihex_binrec *rec;
- const char *errmsg = NULL;
- int res;
-
- res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
- if (res) {
- PRINTK (KERN_ERR, "Cannot load microcode data");
- return res;
- }
-
- /* First record contains just the start address */
- rec = (const struct ihex_binrec *)fw->data;
- if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
- errmsg = "no start record";
- goto fail;
- }
- start_address = be32_to_cpup((__be32 *)rec->data);
-
- rec = ihex_next_binrec(rec);
-
- PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init");
-
- while (rec) {
- PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
- be16_to_cpu(rec->len));
- if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
- errmsg = "record too long";
- goto fail;
- }
- if (be16_to_cpu(rec->len) & 3) {
- errmsg = "odd number of bytes";
- goto fail;
- }
- res = loader_write(lb, dev, rec);
- if (res)
- break;
-
- res = loader_verify(lb, dev, rec);
- if (res)
- break;
- rec = ihex_next_binrec(rec);
- }
- release_firmware(fw);
- if (!res)
- res = loader_start(lb, dev, start_address);
-
- return res;
-fail:
- release_firmware(fw);
- PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
- return -EINVAL;
-}
-
-/********** give adapter parameters **********/
-
-static inline __be32 bus_addr(void * addr) {
- return cpu_to_be32 (virt_to_bus (addr));
-}
-
-static int amb_talk(amb_dev *dev)
-{
- adap_talk_block a;
- unsigned char pool;
- unsigned long timeout;
-
- PRINTD (DBG_FLOW, "amb_talk %p", dev);
-
- a.command_start = bus_addr (dev->cq.ptrs.start);
- a.command_end = bus_addr (dev->cq.ptrs.limit);
- a.tx_start = bus_addr (dev->txq.in.start);
- a.tx_end = bus_addr (dev->txq.in.limit);
- a.txcom_start = bus_addr (dev->txq.out.start);
- a.txcom_end = bus_addr (dev->txq.out.limit);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- // the other "a" items are set up by the adapter
- a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
- a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit);
- a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start);
- a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit);
- a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
- }
-
-#ifdef AMB_NEW_MICROCODE
- // disable fast PLX prefetching
- a.init_flags = 0;
-#endif
-
- // pass the structure
- wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a));
-
- // 2.2 second wait (must not touch doorbell during 2 second DMA test)
- msleep(2200);
- // give the adapter another half second?
- timeout = 500;
- while (rd_plain (dev, offsetof(amb_mem, doorbell)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-// get microcode version
-static void amb_ucode_version(amb_dev *dev)
-{
- u32 major;
- u32 minor;
- command cmd;
- cmd.request = cpu_to_be32 (SRB_GET_VERSION);
- while (command_do (dev, &cmd)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
- major = be32_to_cpu (cmd.args.version.major);
- minor = be32_to_cpu (cmd.args.version.minor);
- PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor);
-}
-
-// get end station address
-static void amb_esi(amb_dev *dev, u8 *esi)
-{
- u32 lower4;
- u16 upper2;
- command cmd;
-
- cmd.request = cpu_to_be32 (SRB_GET_BIA);
- while (command_do (dev, &cmd)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
- lower4 = be32_to_cpu (cmd.args.bia.lower4);
- upper2 = be32_to_cpu (cmd.args.bia.upper2);
- PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2);
-
- if (esi) {
- unsigned int i;
-
- PRINTDB (DBG_INIT, "ESI:");
- for (i = 0; i < ESI_LEN; ++i) {
- if (i < 4)
- esi[i] = bitrev8(lower4>>(8*i));
- else
- esi[i] = bitrev8(upper2>>(8*(i-4)));
- PRINTDM (DBG_INIT, " %02x", esi[i]);
- }
-
- PRINTDE (DBG_INIT, "");
- }
-
- return;
-}
-
-static void fixup_plx_window (amb_dev *dev, loader_block *lb)
-{
- // fix up the PLX-mapped window base address to match the block
- unsigned long blb;
- u32 mapreg;
- blb = virt_to_bus(lb);
- // the kernel stack had better not ever cross a 1Gb boundary!
- mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10]));
- mapreg &= ~onegigmask;
- mapreg |= blb & onegigmask;
- wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg);
- return;
-}
-
-static int amb_init(amb_dev *dev)
-{
- loader_block lb;
-
- u32 version;
-
- if (amb_reset (dev, 1)) {
- PRINTK (KERN_ERR, "card reset failed!");
- } else {
- fixup_plx_window (dev, &lb);
-
- if (get_loader_version (&lb, dev, &version)) {
- PRINTK (KERN_INFO, "failed to get loader version");
- } else {
- PRINTK (KERN_INFO, "loader version is %08x", version);
-
- if (ucode_init (&lb, dev)) {
- PRINTK (KERN_ERR, "microcode failure");
- } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) {
- PRINTK (KERN_ERR, "failed to get memory for queues");
- } else {
-
- if (amb_talk (dev)) {
- PRINTK (KERN_ERR, "adapter did not accept queues");
- } else {
-
- amb_ucode_version (dev);
- return 0;
-
- } /* amb_talk */
-
- destroy_queues (dev);
- } /* create_queues, ucode_init */
-
- amb_reset (dev, 0);
- } /* get_loader_version */
-
- } /* amb_reset */
-
- return -EINVAL;
-}
-
-static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
-{
- unsigned char pool;
-
- // set up known dev items straight away
- dev->pci_dev = pci_dev;
- pci_set_drvdata(pci_dev, dev);
-
- dev->iobase = pci_resource_start (pci_dev, 1);
- dev->irq = pci_dev->irq;
- dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0));
-
- // flags (currently only dead)
- dev->flags = 0;
-
- // Allocate cell rates (fibre)
- // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
- // to be really pedantic, this should be ATM_OC3c_PCR
- dev->tx_avail = ATM_OC3_PCR;
- dev->rx_avail = ATM_OC3_PCR;
-
- // semaphore for txer/rxer modifications - we cannot use a
- // spinlock as the critical region needs to switch processes
- mutex_init(&dev->vcc_sf);
- // queue manipulation spinlocks; we want atomic reads and
- // writes to the queue descriptors (handles IRQ and SMP)
- // consider replacing "int pending" -> "atomic_t available"
- // => problem related to who gets to move queue pointers
- spin_lock_init (&dev->cq.lock);
- spin_lock_init (&dev->txq.lock);
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- spin_lock_init (&dev->rxq[pool].lock);
-}
-
-static void setup_pci_dev(struct pci_dev *pci_dev)
-{
- unsigned char lat;
-
- // enable bus master accesses
- pci_set_master(pci_dev);
-
- // frobnicate latency (upwards, usually)
- pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
-
- if (!pci_lat)
- pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat;
-
- if (lat != pci_lat) {
- PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu",
- lat, pci_lat);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
- }
-}
-
-static int amb_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_ent)
-{
- amb_dev * dev;
- int err;
- unsigned int irq;
-
- err = pci_enable_device(pci_dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
- goto out;
- }
-
- // read resources from PCI configuration space
- irq = pci_dev->irq;
-
- if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) {
- PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
- err = -EINVAL;
- goto out_disable;
- }
-
- PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
- " IO %llx, IRQ %u, MEM %p",
- (unsigned long long)pci_resource_start(pci_dev, 1),
- irq, bus_to_virt(pci_resource_start(pci_dev, 0)));
-
- // check IO region
- err = pci_request_region(pci_dev, 1, DEV_LABEL);
- if (err < 0) {
- PRINTK (KERN_ERR, "IO range already in use!");
- goto out_disable;
- }
-
- dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
- if (!dev) {
- PRINTK (KERN_ERR, "out of memory!");
- err = -ENOMEM;
- goto out_release;
- }
-
- setup_dev(dev, pci_dev);
-
- err = amb_init(dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "adapter initialisation failure");
- goto out_free;
- }
-
- setup_pci_dev(pci_dev);
-
- // grab (but share) IRQ and install handler
- err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "request IRQ failed!");
- goto out_reset;
- }
-
- dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1,
- NULL);
- if (!dev->atm_dev) {
- PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
- err = -EINVAL;
- goto out_free_irq;
- }
-
- PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
- dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
-
- // register our address
- amb_esi (dev, dev->atm_dev->esi);
-
- // 0 bits for vpi, 10 bits for vci
- dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
- dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
-
- timer_setup(&dev->housekeeping, do_housekeeping, 0);
- mod_timer(&dev->housekeeping, jiffies);
-
- // enable host interrupts
- interrupts_on (dev);
-
-out:
- return err;
-
-out_free_irq:
- free_irq(irq, dev);
-out_reset:
- amb_reset(dev, 0);
-out_free:
- kfree(dev);
-out_release:
- pci_release_region(pci_dev, 1);
-out_disable:
- pci_disable_device(pci_dev);
- goto out;
-}
-
-
-static void amb_remove_one(struct pci_dev *pci_dev)
-{
- struct amb_dev *dev;
-
- dev = pci_get_drvdata(pci_dev);
-
- PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
- del_timer_sync(&dev->housekeeping);
- // the drain should not be necessary
- drain_rx_pools(dev);
- interrupts_off(dev);
- amb_reset(dev, 0);
- free_irq(dev->irq, dev);
- pci_disable_device(pci_dev);
- destroy_queues(dev);
- atm_dev_deregister(dev->atm_dev);
- kfree(dev);
- pci_release_region(pci_dev, 1);
-}
-
-static void __init amb_check_args (void) {
- unsigned char pool;
- unsigned int max_rx_size;
-
-#ifdef DEBUG_AMBASSADOR
- PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
-#else
- if (debug)
- PRINTK (KERN_NOTICE, "no debugging support");
-#endif
-
- if (cmds < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "cmds has been raised to %u",
- cmds = MIN_QUEUE_SIZE);
-
- if (txs < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "txs has been raised to %u",
- txs = MIN_QUEUE_SIZE);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if (rxs[pool] < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u",
- pool, rxs[pool] = MIN_QUEUE_SIZE);
-
- // buffers sizes should be greater than zero and strictly increasing
- max_rx_size = 0;
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if (rxs_bs[pool] <= max_rx_size)
- PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)",
- pool, rxs_bs[pool]);
- else
- max_rx_size = rxs_bs[pool];
-
- if (rx_lats < MIN_RX_BUFFERS)
- PRINTK (KERN_NOTICE, "rx_lats has been raised to %u",
- rx_lats = MIN_RX_BUFFERS);
-
- return;
-}
-
-/********** module stuff **********/
-
-MODULE_AUTHOR(maintainer_string);
-MODULE_DESCRIPTION(description_string);
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("atmsar11.fw");
-module_param(debug, ushort, 0644);
-module_param(cmds, uint, 0);
-module_param(txs, uint, 0);
-module_param_array(rxs, uint, NULL, 0);
-module_param_array(rxs_bs, uint, NULL, 0);
-module_param(rx_lats, uint, 0);
-module_param(pci_lat, byte, 0);
-MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
-MODULE_PARM_DESC(cmds, "number of command queue entries");
-MODULE_PARM_DESC(txs, "number of TX queue entries");
-MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]");
-MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]");
-MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies");
-MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-
-/********** module entry **********/
-
-static const struct pci_device_id amb_pci_tbl[] = {
- { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
- { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
-
-static struct pci_driver amb_driver = {
- .name = "amb",
- .probe = amb_probe,
- .remove = amb_remove_one,
- .id_table = amb_pci_tbl,
-};
-
-static int __init amb_module_init (void)
-{
- PRINTD (DBG_FLOW|DBG_INIT, "init_module");
-
- BUILD_BUG_ON(sizeof(amb_mem) != 4*16 + 4*12);
-
- show_version();
-
- amb_check_args();
-
- // get the juice
- return pci_register_driver(&amb_driver);
-}
-
-/********** module exit **********/
-
-static void __exit amb_module_exit (void)
-{
- PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module");
-
- pci_unregister_driver(&amb_driver);
-}
-
-module_init(amb_module_init);
-module_exit(amb_module_exit);
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
deleted file mode 100644
index 086ceb8568dc..000000000000
--- a/drivers/atm/ambassador.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Madge Ambassador ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-#ifndef AMBASSADOR_H
-#define AMBASSADOR_H
-
-
-#ifdef CONFIG_ATM_AMBASSADOR_DEBUG
-#define DEBUG_AMBASSADOR
-#endif
-
-#define DEV_LABEL "amb"
-
-#ifndef PCI_VENDOR_ID_MADGE
-#define PCI_VENDOR_ID_MADGE 0x10B6
-#endif
-#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR
-#define PCI_DEVICE_ID_MADGE_AMBASSADOR 0x1001
-#endif
-#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR_BAD
-#define PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD 0x1002
-#endif
-
-// diagnostic output
-
-#define PRINTK(severity,format,args...) \
- printk(severity DEV_LABEL ": " format "\n" , ## args)
-
-#ifdef DEBUG_AMBASSADOR
-
-#define DBG_ERR 0x0001
-#define DBG_WARN 0x0002
-#define DBG_INFO 0x0004
-#define DBG_INIT 0x0008
-#define DBG_LOAD 0x0010
-#define DBG_VCC 0x0020
-#define DBG_QOS 0x0040
-#define DBG_CMD 0x0080
-#define DBG_TX 0x0100
-#define DBG_RX 0x0200
-#define DBG_SKB 0x0400
-#define DBG_POOL 0x0800
-#define DBG_IRQ 0x1000
-#define DBG_FLOW 0x2000
-#define DBG_REGS 0x4000
-#define DBG_DATA 0x8000
-#define DBG_MASK 0xffff
-
-/* the ## prevents the annoying double expansion of the macro arguments */
-/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */
-#define PRINTDB(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 )
-#define PRINTDM(bits,format,args...) \
- ( (debug & (bits)) ? printk (format , ## args) : 1 )
-#define PRINTDE(bits,format,args...) \
- ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 )
-#define PRINTD(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 )
-
-#else
-
-#define PRINTD(bits,format,args...)
-#define PRINTDB(bits,format,args...)
-#define PRINTDM(bits,format,args...)
-#define PRINTDE(bits,format,args...)
-
-#endif
-
-#define PRINTDD(bits,format,args...)
-#define PRINTDDB(sec,fmt,args...)
-#define PRINTDDM(sec,fmt,args...)
-#define PRINTDDE(sec,fmt,args...)
-
-// tunable values (?)
-
-/* MUST be powers of two -- why ? */
-#define COM_Q_ENTRIES 8
-#define TX_Q_ENTRIES 32
-#define RX_Q_ENTRIES 64
-
-// fixed values
-
-// guessing
-#define AMB_EXTENT 0x80
-
-// Minimum allowed size for an Ambassador queue
-#define MIN_QUEUE_SIZE 2
-
-// Ambassador microcode allows 1 to 4 pools, we use 4 (simpler)
-#define NUM_RX_POOLS 4
-
-// minimum RX buffers required to cope with replenishing delay
-#define MIN_RX_BUFFERS 1
-
-// minimum PCI latency we will tolerate (32 IS TOO SMALL)
-#define MIN_PCI_LATENCY 64 // 255
-
-// VCs supported by card (VPI always 0)
-#define NUM_VPI_BITS 0
-#define NUM_VCI_BITS 10
-#define NUM_VCS 1024
-
-/* The status field bits defined so far. */
-#define RX_ERR 0x8000 // always present if there is an error (hmm)
-#define CRC_ERR 0x4000 // AAL5 CRC error
-#define LEN_ERR 0x2000 // overlength frame
-#define ABORT_ERR 0x1000 // zero length field in received frame
-#define UNUSED_ERR 0x0800 // buffer returned unused
-
-// Adaptor commands
-
-#define SRB_OPEN_VC 0
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(flags<<16) or wordswap(flags)*/
-/* flags: */
-
-/* LANE: 0x0004 */
-/* NOT_UBR: 0x0008 */
-/* ABR: 0x0010 */
-
-/* RxPool0: 0x0000 */
-/* RxPool1: 0x0020 */
-/* RxPool2: 0x0040 */
-/* RxPool3: 0x0060 */
-
-/* par_2: dwordswap(fp_rate<<16) or wordswap(fp_rate) */
-
-#define SRB_CLOSE_VC 1
-/* par_0: dwordswap(VC_number) */
-
-#define SRB_GET_BIA 2
-/* returns */
-/* par_0: dwordswap(half BIA) */
-/* par_1: dwordswap(half BIA) */
-
-#define SRB_GET_SUNI_STATS 3
-/* par_0: dwordswap(physical_host_address) */
-
-#define SRB_SET_BITS_8 4
-#define SRB_SET_BITS_16 5
-#define SRB_SET_BITS_32 6
-#define SRB_CLEAR_BITS_8 7
-#define SRB_CLEAR_BITS_16 8
-#define SRB_CLEAR_BITS_32 9
-/* par_0: dwordswap(ATMizer address) */
-/* par_1: dwordswap(mask) */
-
-#define SRB_SET_8 10
-#define SRB_SET_16 11
-#define SRB_SET_32 12
-/* par_0: dwordswap(ATMizer address) */
-/* par_1: dwordswap(data) */
-
-#define SRB_GET_32 13
-/* par_0: dwordswap(ATMizer address) */
-/* returns */
-/* par_1: dwordswap(ATMizer data) */
-
-#define SRB_GET_VERSION 14
-/* returns */
-/* par_0: dwordswap(Major Version) */
-/* par_1: dwordswap(Minor Version) */
-
-#define SRB_FLUSH_BUFFER_Q 15
-/* Only flags to define which buffer pool; all others must be zero */
-/* par_0: dwordswap(flags<<16) or wordswap(flags)*/
-
-#define SRB_GET_DMA_SPEEDS 16
-/* returns */
-/* par_0: dwordswap(Read speed (bytes/sec)) */
-/* par_1: dwordswap(Write speed (bytes/sec)) */
-
-#define SRB_MODIFY_VC_RATE 17
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(fp_rate<<16) or wordswap(fp_rate) */
-
-#define SRB_MODIFY_VC_FLAGS 18
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(flags<<16) or wordswap(flags)*/
-
-/* flags: */
-
-/* LANE: 0x0004 */
-/* NOT_UBR: 0x0008 */
-/* ABR: 0x0010 */
-
-/* RxPool0: 0x0000 */
-/* RxPool1: 0x0020 */
-/* RxPool2: 0x0040 */
-/* RxPool3: 0x0060 */
-
-#define SRB_RATE_SHIFT 16
-#define SRB_POOL_SHIFT (SRB_FLAGS_SHIFT+5)
-#define SRB_FLAGS_SHIFT 16
-
-#define SRB_STOP_TASKING 19
-#define SRB_START_TASKING 20
-#define SRB_SHUT_DOWN 21
-#define MAX_SRB 21
-
-#define SRB_COMPLETE 0xffffffff
-
-#define TX_FRAME 0x80000000
-
-// number of types of SRB MUST be a power of two -- why?
-#define NUM_OF_SRB 32
-
-// number of bits of period info for rate
-#define MAX_RATE_BITS 6
-
-#define TX_UBR 0x0000
-#define TX_UBR_CAPPED 0x0008
-#define TX_ABR 0x0018
-#define TX_FRAME_NOTCAP 0x0000
-#define TX_FRAME_CAPPED 0x8000
-
-#define FP_155_RATE 0x24b1
-#define FP_25_RATE 0x1f9d
-
-/* #define VERSION_NUMBER 0x01000000 // initial release */
-/* #define VERSION_NUMBER 0x01010000 // fixed startup probs PLX MB0 not cleared */
-/* #define VERSION_NUMBER 0x01020000 // changed SUNI reset timings; allowed r/w onchip */
-
-/* #define VERSION_NUMBER 0x01030000 // clear local doorbell int reg on reset */
-/* #define VERSION_NUMBER 0x01040000 // PLX bug work around version PLUS */
-/* remove race conditions on basic interface */
-/* indicate to the host that diagnostics */
-/* have finished; if failed, how and what */
-/* failed */
-/* fix host memory test to fix PLX bug */
-/* allow flash upgrade and BIA upgrade directly */
-/* */
-#define VERSION_NUMBER 0x01050025 /* Jason's first hacked version. */
-/* Change in download algorithm */
-
-#define DMA_VALID 0xb728e149 /* completely random */
-
-#define FLASH_BASE 0xa0c00000
-#define FLASH_SIZE 0x00020000 /* 128K */
-#define BIA_BASE (FLASH_BASE+0x0001c000) /* Flash Sector 7 */
-#define BIA_ADDRESS ((void *)0xa0c1c000)
-#define PLX_BASE 0xe0000000
-
-typedef enum {
- host_memory_test = 1,
- read_adapter_memory,
- write_adapter_memory,
- adapter_start,
- get_version_number,
- interrupt_host,
- flash_erase_sector,
- adap_download_block = 0x20,
- adap_erase_flash,
- adap_run_in_iram,
- adap_end_download
-} loader_command;
-
-#define BAD_COMMAND (-1)
-#define COMMAND_IN_PROGRESS 1
-#define COMMAND_PASSED_TEST 2
-#define COMMAND_FAILED_TEST 3
-#define COMMAND_READ_DATA_OK 4
-#define COMMAND_READ_BAD_ADDRESS 5
-#define COMMAND_WRITE_DATA_OK 6
-#define COMMAND_WRITE_BAD_ADDRESS 7
-#define COMMAND_WRITE_FLASH_FAILURE 8
-#define COMMAND_COMPLETE 9
-#define COMMAND_FLASH_ERASE_FAILURE 10
-#define COMMAND_WRITE_BAD_DATA 11
-
-/* bit fields for mailbox[0] return values */
-
-#define GPINT_TST_FAILURE 0x00000001
-#define SUNI_DATA_PATTERN_FAILURE 0x00000002
-#define SUNI_DATA_BITS_FAILURE 0x00000004
-#define SUNI_UTOPIA_FAILURE 0x00000008
-#define SUNI_FIFO_FAILURE 0x00000010
-#define SRAM_FAILURE 0x00000020
-#define SELF_TEST_FAILURE 0x0000003f
-
-/* mailbox[1] = 0 in progress, -1 on completion */
-/* mailbox[2] = current test 00 00 test(8 bit) phase(8 bit) */
-/* mailbox[3] = last failure, 00 00 test(8 bit) phase(8 bit) */
-/* mailbox[4],mailbox[5],mailbox[6] random failure values */
-
-/* PLX/etc. memory map including command structure */
-
-/* These registers may also be memory mapped in PCI memory */
-
-#define UNUSED_LOADER_MAILBOXES 6
-
-typedef struct {
- u32 stuff[16];
- union {
- struct {
- u32 result;
- u32 ready;
- u32 stuff[UNUSED_LOADER_MAILBOXES];
- } loader;
- struct {
- u32 cmd_address;
- u32 tx_address;
- u32 rx_address[NUM_RX_POOLS];
- u32 gen_counter;
- u32 spare;
- } adapter;
- } mb;
- u32 doorbell;
- u32 interrupt;
- u32 interrupt_control;
- u32 reset_control;
-} amb_mem;
-
-/* RESET bit, IRQ (card to host) and doorbell (host to card) enable bits */
-#define AMB_RESET_BITS 0x40000000
-#define AMB_INTERRUPT_BITS 0x00000300
-#define AMB_DOORBELL_BITS 0x00030000
-
-/* loader commands */
-
-#define MAX_COMMAND_DATA 13
-#define MAX_TRANSFER_DATA 11
-
-typedef struct {
- __be32 address;
- __be32 count;
- __be32 data[MAX_TRANSFER_DATA];
-} transfer_block;
-
-typedef struct {
- __be32 result;
- __be32 command;
- union {
- transfer_block transfer;
- __be32 version;
- __be32 start;
- __be32 data[MAX_COMMAND_DATA];
- } payload;
- __be32 valid;
-} loader_block;
-
-/* command queue */
-
-/* Again all data are BIG ENDIAN */
-
-typedef struct {
- union {
- struct {
- __be32 vc;
- __be32 flags;
- __be32 rate;
- } open;
- struct {
- __be32 vc;
- __be32 rate;
- } modify_rate;
- struct {
- __be32 vc;
- __be32 flags;
- } modify_flags;
- struct {
- __be32 vc;
- } close;
- struct {
- __be32 lower4;
- __be32 upper2;
- } bia;
- struct {
- __be32 address;
- } suni;
- struct {
- __be32 major;
- __be32 minor;
- } version;
- struct {
- __be32 read;
- __be32 write;
- } speed;
- struct {
- __be32 flags;
- } flush;
- struct {
- __be32 address;
- __be32 data;
- } memory;
- __be32 par[3];
- } args;
- __be32 request;
-} command;
-
-/* transmit queues and associated structures */
-
-/* The hosts transmit structure. All BIG ENDIAN; host address
- restricted to first 1GByte, but address passed to the card must
- have the top MS bit or'ed in. -- check this */
-
-/* TX is described by 1+ tx_frags followed by a tx_frag_end */
-
-typedef struct {
- __be32 bytes;
- __be32 address;
-} tx_frag;
-
-/* apart from handle the fields here are for the adapter to play with
- and should be set to zero */
-
-typedef struct {
- u32 handle;
- u16 vc;
- u16 next_descriptor_length;
- u32 next_descriptor;
-#ifdef AMB_NEW_MICROCODE
- u8 cpcs_uu;
- u8 cpi;
- u16 pad;
-#endif
-} tx_frag_end;
-
-typedef struct {
- tx_frag tx_frag;
- tx_frag_end tx_frag_end;
- struct sk_buff * skb;
-} tx_simple;
-
-#if 0
-typedef union {
- tx_frag fragment;
- tx_frag_end end_of_list;
-} tx_descr;
-#endif
-
-/* this "points" to the sequence of fragments and trailer */
-
-typedef struct {
- __be16 vc;
- __be16 tx_descr_length;
- __be32 tx_descr_addr;
-} tx_in;
-
-/* handle is the handle from tx_in */
-
-typedef struct {
- u32 handle;
-} tx_out;
-
-/* receive frame structure */
-
-/* All BIG ENDIAN; handle is as passed from host; length is zero for
- aborted frames, and frames with errors. Header is actually VC
- number, lec-id is NOT yet supported. */
-
-typedef struct {
- u32 handle;
- __be16 vc;
- __be16 lec_id; // unused
- __be16 status;
- __be16 length;
-} rx_out;
-
-/* buffer supply structure */
-
-typedef struct {
- u32 handle;
- __be32 host_address;
-} rx_in;
-
-/* This first structure is the area in host memory where the adapter
- writes its pointer values. These pointer values are BIG ENDIAN and
- reside in the same 4MB 'page' as this structure. The host gives the
- adapter the address of this block by sending a doorbell interrupt
- to the adapter after downloading the code and setting it going. The
- addresses have the top 10 bits set to 1010000010b -- really?
-
- The host must initialise these before handing the block to the
- adapter. */
-
-typedef struct {
- __be32 command_start; /* SRB commands completions */
- __be32 command_end; /* SRB commands completions */
- __be32 tx_start;
- __be32 tx_end;
- __be32 txcom_start; /* tx completions */
- __be32 txcom_end; /* tx completions */
- struct {
- __be32 buffer_start;
- __be32 buffer_end;
- u32 buffer_q_get;
- u32 buffer_q_end;
- u32 buffer_aptr;
- __be32 rx_start; /* rx completions */
- __be32 rx_end;
- u32 rx_ptr;
- __be32 buffer_size; /* size of host buffer */
- } rec_struct[NUM_RX_POOLS];
-#ifdef AMB_NEW_MICROCODE
- u16 init_flags;
- u16 talk_block_spare;
-#endif
-} adap_talk_block;
-
-/* This structure must be kept in line with the vcr image in sarmain.h
-
- This is the structure in the host filled in by the adapter by
- GET_SUNI_STATS */
-
-typedef struct {
- u8 racp_chcs;
- u8 racp_uhcs;
- u16 spare;
- u32 racp_rcell;
- u32 tacp_tcell;
- u32 flags;
- u32 dropped_cells;
- u32 dropped_frames;
-} suni_stats;
-
-typedef enum {
- dead
-} amb_flags;
-
-#define NEXTQ(current,start,limit) \
- ( (current)+1 < (limit) ? (current)+1 : (start) )
-
-typedef struct {
- command * start;
- command * in;
- command * out;
- command * limit;
-} amb_cq_ptrs;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int high;
- unsigned int filled;
- unsigned int maximum; // size - 1 (q implementation)
- amb_cq_ptrs ptrs;
-} amb_cq;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int high;
- unsigned int filled;
- unsigned int maximum; // size - 1 (q implementation)
- struct {
- tx_in * start;
- tx_in * ptr;
- tx_in * limit;
- } in;
- struct {
- tx_out * start;
- tx_out * ptr;
- tx_out * limit;
- } out;
-} amb_txq;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int low;
- unsigned int emptied;
- unsigned int maximum; // size - 1 (q implementation)
- struct {
- rx_in * start;
- rx_in * ptr;
- rx_in * limit;
- } in;
- struct {
- rx_out * start;
- rx_out * ptr;
- rx_out * limit;
- } out;
- unsigned int buffers_wanted;
- unsigned int buffer_size;
-} amb_rxq;
-
-typedef struct {
- unsigned long tx_ok;
- struct {
- unsigned long ok;
- unsigned long error;
- unsigned long badcrc;
- unsigned long toolong;
- unsigned long aborted;
- unsigned long unused;
- } rx;
-} amb_stats;
-
-// a single struct pointed to by atm_vcc->dev_data
-
-typedef struct {
- u8 tx_vc_bits:7;
- u8 tx_present:1;
-} amb_tx_info;
-
-typedef struct {
- unsigned char pool;
-} amb_rx_info;
-
-typedef struct {
- amb_rx_info rx_info;
- u16 tx_frame_bits;
- unsigned int tx_rate;
- unsigned int rx_rate;
-} amb_vcc;
-
-struct amb_dev {
- u8 irq;
- unsigned long flags;
- u32 iobase;
- u32 * membase;
-
- amb_cq cq;
- amb_txq txq;
- amb_rxq rxq[NUM_RX_POOLS];
-
- struct mutex vcc_sf;
- amb_tx_info txer[NUM_VCS];
- struct atm_vcc * rxer[NUM_VCS];
- unsigned int tx_avail;
- unsigned int rx_avail;
-
- amb_stats stats;
-
- struct atm_dev * atm_dev;
- struct pci_dev * pci_dev;
- struct timer_list housekeeping;
-};
-
-typedef struct amb_dev amb_dev;
-
-#define AMB_DEV(atm_dev) ((amb_dev *) (atm_dev)->dev_data)
-#define AMB_VCC(atm_vcc) ((amb_vcc *) (atm_vcc)->dev_data)
-
-/* rate rounding */
-
-typedef enum {
- round_up,
- round_down,
- round_nearest
-} rounding;
-
-#endif
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
deleted file mode 100644
index 4f67404fe64c..000000000000
--- a/drivers/atm/firestream.c
+++ /dev/null
@@ -1,2057 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-/* drivers/atm/firestream.c - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/poison.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/ioport.h> /* for request_region */
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/wait.h>
-
-#include "firestream.h"
-
-static int loopback = 0;
-static int num=0x5a;
-
-/* According to measurements (but they look suspicious to me!) done in
- * '97, 37% of the packets are one cell in size. So it pays to have
- * buffers allocated at that size. A large jump in percentage of
- * packets occurs at packets around 536 bytes in length. So it also
- * pays to have those pre-allocated. Unfortunately, we can't fully
- * take advantage of this as the majority of the packets is likely to
- * be TCP/IP (As where obviously the measurement comes from) There the
- * link would be opened with say a 1500 byte MTU, and we can't handle
- * smaller buffers more efficiently than the larger ones. -- REW
- */
-
-/* Due to the way Linux memory management works, specifying "576" as
- * an allocation size here isn't going to help. They are allocated
- * from 1024-byte regions anyway. With the size of the sk_buffs (quite
- * large), it doesn't pay to allocate the smallest size (64) -- REW */
-
-/* This is all guesswork. Hard numbers to back this up or disprove this,
- * are appreciated. -- REW */
-
-/* The last entry should be about 64k. However, the "buffer size" is
- * passed to the chip in a 16 bit field. I don't know how "65536"
- * would be interpreted. -- REW */
-
-#define NP FS_NR_FREE_POOLS
-static int rx_buf_sizes[NP] = {128, 256, 512, 1024, 2048, 4096, 16384, 65520};
-/* log2: 7 8 9 10 11 12 14 16 */
-
-#if 0
-static int rx_pool_sizes[NP] = {1024, 1024, 512, 256, 128, 64, 32, 32};
-#else
-/* debug */
-static int rx_pool_sizes[NP] = {128, 128, 128, 64, 64, 64, 32, 32};
-#endif
-/* log2: 10 10 9 8 7 6 5 5 */
-/* sumlog2: 17 18 18 18 18 18 19 21 */
-/* mem allocated: 128k 256k 256k 256k 256k 256k 512k 2M */
-/* tot mem: almost 4M */
-
-/* NP is shorter, so that it fits on a single line. */
-#undef NP
-
-
-/* Small hardware gotcha:
-
- The FS50 CAM (VP/VC match registers) always take the lowest channel
- number that matches. This is not a problem.
-
- However, they also ignore whether the channel is enabled or
- not. This means that if you allocate channel 0 to 1.2 and then
- channel 1 to 0.0, then disabeling channel 0 and writing 0 to the
- match channel for channel 0 will "steal" the traffic from channel
- 1, even if you correctly disable channel 0.
-
- Workaround:
-
- - When disabling channels, write an invalid VP/VC value to the
- match register. (We use 0xffffffff, which in the worst case
- matches VP/VC = <maxVP>/<maxVC>, but I expect it not to match
- anything as some "when not in use, program to 0" bits are now
- programmed to 1...)
-
- - Don't initialize the match registers to 0, as 0.0 is a valid
- channel.
-*/
-
-
-/* Optimization hints and tips.
-
- The FireStream chips are very capable of reducing the amount of
- "interrupt-traffic" for the CPU. This driver requests an interrupt on EVERY
- action. You could try to minimize this a bit.
-
- Besides that, the userspace->kernel copy and the PCI bus are the
- performance limiting issues for this driver.
-
- You could queue up a bunch of outgoing packets without telling the
- FireStream. I'm not sure that's going to win you much though. The
- Linux layer won't tell us in advance when it's not going to give us
- any more packets in a while. So this is tricky to implement right without
- introducing extra delays.
-
- -- REW
- */
-
-
-
-
-/* The strings that define what the RX queue entry is all about. */
-/* Fujitsu: Please tell me which ones can have a pointer to a
- freepool descriptor! */
-static char *res_strings[] = {
- "RX OK: streaming not EOP",
- "RX OK: streaming EOP",
- "RX OK: Single buffer packet",
- "RX OK: packet mode",
- "RX OK: F4 OAM (end to end)",
- "RX OK: F4 OAM (Segment)",
- "RX OK: F5 OAM (end to end)",
- "RX OK: F5 OAM (Segment)",
- "RX OK: RM cell",
- "RX OK: TRANSP cell",
- "RX OK: TRANSPC cell",
- "Unmatched cell",
- "reserved 12",
- "reserved 13",
- "reserved 14",
- "Unrecognized cell",
- "reserved 16",
- "reassembly abort: AAL5 abort",
- "packet purged",
- "packet ageing timeout",
- "channel ageing timeout",
- "calculated length error",
- "programmed length limit error",
- "aal5 crc32 error",
- "oam transp or transpc crc10 error",
- "reserved 25",
- "reserved 26",
- "reserved 27",
- "reserved 28",
- "reserved 29",
- "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
- "reassembly abort: no buffers",
- "receive buffer overflow",
- "change in GFC",
- "receive buffer full",
- "low priority discard - no receive descriptor",
- "low priority discard - missing end of packet",
- "reserved 37",
- "reserved 38",
- "reserved 39",
- "reserved 40",
- "reserved 41",
- "reserved 42",
- "reserved 43",
- "reserved 44",
- "reserved 45",
- "reserved 46",
- "reserved 47",
- "reserved 48",
- "reserved 49",
- "reserved 50",
- "reserved 51",
- "reserved 52",
- "reserved 53",
- "reserved 54",
- "reserved 55",
- "reserved 56",
- "reserved 57",
- "reserved 58",
- "reserved 59",
- "reserved 60",
- "reserved 61",
- "reserved 62",
- "reserved 63",
-};
-
-static char *irq_bitname[] = {
- "LPCO",
- "DPCO",
- "RBRQ0_W",
- "RBRQ1_W",
- "RBRQ2_W",
- "RBRQ3_W",
- "RBRQ0_NF",
- "RBRQ1_NF",
- "RBRQ2_NF",
- "RBRQ3_NF",
- "BFP_SC",
- "INIT",
- "INIT_ERR",
- "USCEO",
- "UPEC0",
- "VPFCO",
- "CRCCO",
- "HECO",
- "TBRQ_W",
- "TBRQ_NF",
- "CTPQ_E",
- "GFC_C0",
- "PCI_FTL",
- "CSQ_W",
- "CSQ_NF",
- "EXT_INT",
- "RXDMA_S"
-};
-
-
-#define PHY_EOF -1
-#define PHY_CLEARALL -2
-
-struct reginit_item {
- int reg, val;
-};
-
-
-static struct reginit_item PHY_NTC_INIT[] = {
- { PHY_CLEARALL, 0x40 },
- { 0x12, 0x0001 },
- { 0x13, 0x7605 },
- { 0x1A, 0x0001 },
- { 0x1B, 0x0005 },
- { 0x38, 0x0003 },
- { 0x39, 0x0006 }, /* changed here to make loopback */
- { 0x01, 0x5262 },
- { 0x15, 0x0213 },
- { 0x00, 0x0003 },
- { PHY_EOF, 0}, /* -1 signals end of list */
-};
-
-
-/* Safetyfeature: If the card interrupts more than this number of times
- in a jiffy (1/100th of a second) then we just disable the interrupt and
- print a message. This prevents the system from hanging.
-
- 150000 packets per second is close to the limit a PC is going to have
- anyway. We therefore have to disable this for production. -- REW */
-#undef IRQ_RATE_LIMIT // 100
-
-/* Interrupts work now. Unlike serial cards, ATM cards don't work all
- that great without interrupts. -- REW */
-#undef FS_POLL_FREQ // 100
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile... -- REW
-*/
-#define DEBUG
-
-#ifdef DEBUG
-#define fs_dprintk(f, str...) if (fs_debug & f) printk (str)
-#else
-#define fs_dprintk(f, str...) /* nothing */
-#endif
-
-
-static int fs_keystream = 0;
-
-#ifdef DEBUG
-/* I didn't forget to set this to zero before shipping. Hit me with a stick
- if you get this with the debug default not set to zero again. -- REW */
-static int fs_debug = 0;
-#else
-#define fs_debug 0
-#endif
-
-#ifdef MODULE
-#ifdef DEBUG
-module_param(fs_debug, int, 0644);
-#endif
-module_param(loopback, int, 0);
-module_param(num, int, 0);
-module_param(fs_keystream, int, 0);
-/* XXX Add rx_buf_sizes, and rx_pool_sizes As per request Amar. -- REW */
-#endif
-
-
-#define FS_DEBUG_FLOW 0x00000001
-#define FS_DEBUG_OPEN 0x00000002
-#define FS_DEBUG_QUEUE 0x00000004
-#define FS_DEBUG_IRQ 0x00000008
-#define FS_DEBUG_INIT 0x00000010
-#define FS_DEBUG_SEND 0x00000020
-#define FS_DEBUG_PHY 0x00000040
-#define FS_DEBUG_CLEANUP 0x00000080
-#define FS_DEBUG_QOS 0x00000100
-#define FS_DEBUG_TXQ 0x00000200
-#define FS_DEBUG_ALLOC 0x00000400
-#define FS_DEBUG_TXMEM 0x00000800
-#define FS_DEBUG_QSIZE 0x00001000
-
-
-#define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__)
-#define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__)
-
-
-static struct fs_dev *fs_boards = NULL;
-
-#ifdef DEBUG
-
-static void my_hd (void *addr, int len)
-{
- int j, ch;
- unsigned char *ptr = addr;
-
- while (len > 0) {
- printk ("%p ", ptr);
- for (j=0;j < ((len < 16)?len:16);j++) {
- printk ("%02x %s", ptr[j], (j==7)?" ":"");
- }
- for ( ;j < 16;j++) {
- printk (" %s", (j==7)?" ":"");
- }
- for (j=0;j < ((len < 16)?len:16);j++) {
- ch = ptr[j];
- printk ("%c", (ch < 0x20)?'.':((ch > 0x7f)?'.':ch));
- }
- printk ("\n");
- ptr += 16;
- len -= 16;
- }
-}
-#else /* DEBUG */
-static void my_hd (void *addr, int len){}
-#endif /* DEBUG */
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-/* Hmm. If this is ATM specific, why isn't there an ATM routine for this?
- * I copied it over from the ambassador driver. -- REW */
-
-static inline void fs_kfree_skb (struct sk_buff * skb)
-{
- if (ATM_SKB(skb)->vcc->pop)
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- else
- dev_kfree_skb_any (skb);
-}
-
-
-
-
-/* It seems the ATM forum recommends this horribly complicated 16bit
- * floating point format. Turns out the Ambassador uses the exact same
- * encoding. I just copied it over. If Mitch agrees, I'll move it over
- * to the atm_misc file or something like that. (and remove it from
- * here and the ambassador driver) -- REW
- */
-
-/* The good thing about this format is that it is monotonic. So,
- a conversion routine need not be very complicated. To be able to
- round "nearest" we need to take along a few extra bits. Lets
- put these after 16 bits, so that we can just return the top 16
- bits of the 32bit number as the result:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- while (rate & 0xfc000000) {
- rate >>= 1;
- e++;
- }
- while (! (rate & 0xfe000000)) {
- rate <<= 1;
- e--;
- }
-
-// Now the mantissa is in positions bit 16-25. Excepf for the "hidden 1" that's in bit 26.
- rate &= ~0x02000000;
-// Next add in the exponent
- rate |= e << (16+9);
-// And perform the rounding:
- return (rate + round[r]) >> 16;
- }
-
- 14 lines-of-code. Compare that with the 120 that the Ambassador
- guys needed. (would be 8 lines shorter if I'd try to really reduce
- the number of lines:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- for (; rate & 0xfc000000 ;rate >>= 1, e++);
- for (;!(rate & 0xfe000000);rate <<= 1, e--);
- return ((rate & ~0x02000000) | (e << (16+9)) + round[r]) >> 16;
- }
-
- Exercise for the reader: Remove one more line-of-code, without
- cheating. (Just joining two lines is cheating). (I know it's
- possible, don't think you've beat me if you found it... If you
- manage to lose two lines or more, keep me updated! ;-)
-
- -- REW */
-
-
-#define ROUND_UP 1
-#define ROUND_DOWN 2
-#define ROUND_NEAREST 3
-/********** make rate (not quite as much fun as Horizon) **********/
-
-static int make_rate(unsigned int rate, int r,
- u16 *bits, unsigned int *actual)
-{
- unsigned char exp = -1; /* hush gcc */
- unsigned int man = -1; /* hush gcc */
-
- fs_dprintk (FS_DEBUG_QOS, "make_rate %u", rate);
-
- /* rates in cells per second, ITU format (nasty 16-bit floating-point)
- given 5-bit e and 9-bit m:
- rate = EITHER (1+m/2^9)*2^e OR 0
- bits = EITHER 1<<14 | e<<9 | m OR 0
- (bit 15 is "reserved", bit 14 "non-zero")
- smallest rate is 0 (special representation)
- largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
- smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
- simple algorithm:
- find position of top bit, this gives e
- remove top bit and shift (rounding if feeling clever) by 9-e
- */
- /* Ambassador ucode bug: please don't set bit 14! so 0 rate not
- representable. // This should move into the ambassador driver
- when properly merged. -- REW */
-
- if (rate > 0xffc00000U) {
- /* larger than largest representable rate */
-
- if (r == ROUND_UP) {
- return -EINVAL;
- } else {
- exp = 31;
- man = 511;
- }
-
- } else if (rate) {
- /* representable rate */
-
- exp = 31;
- man = rate;
-
- /* invariant: rate = man*2^(exp-31) */
- while (!(man & (1<<31))) {
- exp = exp - 1;
- man = man<<1;
- }
-
- /* man has top bit set
- rate = (2^31+(man-2^31))*2^(exp-31)
- rate = (1+(man-2^31)/2^31)*2^exp
- */
- man = man<<1;
- man &= 0xffffffffU; /* a nop on 32-bit systems */
- /* rate = (1+man/2^32)*2^exp
-
- exp is in the range 0 to 31, man is in the range 0 to 2^32-1
- time to lose significance... we want m in the range 0 to 2^9-1
- rounding presents a minor problem... we first decide which way
- we are rounding (based on given rounding direction and possibly
- the bits of the mantissa that are to be discarded).
- */
-
- switch (r) {
- case ROUND_DOWN: {
- /* just truncate */
- man = man>>(32-9);
- break;
- }
- case ROUND_UP: {
- /* check all bits that we are discarding */
- if (man & (~0U>>9)) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- case ROUND_NEAREST: {
- /* check msb that we are discarding */
- if (man & (1<<(32-9-1))) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- }
-
- } else {
- /* zero rate - not representable */
-
- if (r == ROUND_DOWN) {
- return -EINVAL;
- } else {
- exp = 0;
- man = 0;
- }
- }
-
- fs_dprintk (FS_DEBUG_QOS, "rate: man=%u, exp=%hu", man, exp);
-
- if (bits)
- *bits = /* (1<<14) | */ (exp<<9) | man;
-
- if (actual)
- *actual = (exp >= 9)
- ? (1 << exp) + (man << (exp-9))
- : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
-
- return 0;
-}
-
-
-
-
-/* FireStream access routines */
-/* For DEEP-DOWN debugging these can be rigged to intercept accesses to
- certain registers or to just log all accesses. */
-
-static inline void write_fs (struct fs_dev *dev, int offset, u32 val)
-{
- writel (val, dev->base + offset);
-}
-
-
-static inline u32 read_fs (struct fs_dev *dev, int offset)
-{
- return readl (dev->base + offset);
-}
-
-
-
-static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q)
-{
- return bus_to_virt (read_fs (dev, Q_WP(q->offset)) & Q_ADDR_MASK);
-}
-
-
-static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe)
-{
- u32 wp;
- struct FS_QENTRY *cqe;
-
- /* XXX Sanity check: the write pointer can be checked to be
- still the same as the value passed as qe... -- REW */
- /* udelay (5); */
- while ((wp = read_fs (dev, Q_WP (q->offset))) & Q_FULL) {
- fs_dprintk (FS_DEBUG_TXQ, "Found queue at %x full. Waiting.\n",
- q->offset);
- schedule ();
- }
-
- wp &= ~0xf;
- cqe = bus_to_virt (wp);
- if (qe != cqe) {
- fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe);
- }
-
- write_fs (dev, Q_WP(q->offset), Q_INCWRAP);
-
- {
- static int c;
- if (!(c++ % 100))
- {
- int rp, wp;
- rp = read_fs (dev, Q_RP(q->offset));
- wp = read_fs (dev, Q_WP(q->offset));
- fs_dprintk (FS_DEBUG_TXQ, "q at %d: %x-%x: %x entries.\n",
- q->offset, rp, wp, wp-rp);
- }
- }
-}
-
-#ifdef DEBUG_EXTRA
-static struct FS_QENTRY pq[60];
-static int qp;
-
-static struct FS_BPENTRY dq[60];
-static int qd;
-static void *da[60];
-#endif
-
-static void submit_queue (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- struct FS_QENTRY *qe;
-
- qe = get_qentry (dev, q);
- qe->cmd = cmd;
- qe->p0 = p1;
- qe->p1 = p2;
- qe->p2 = p3;
- submit_qentry (dev, q, qe);
-
-#ifdef DEBUG_EXTRA
- pq[qp].cmd = cmd;
- pq[qp].p0 = p1;
- pq[qp].p1 = p2;
- pq[qp].p2 = p3;
- qp++;
- if (qp >= 60) qp = 0;
-#endif
-}
-
-/* Test the "other" way one day... -- REW */
-#if 1
-#define submit_command submit_queue
-#else
-
-static void submit_command (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- write_fs (dev, CMDR0, cmd);
- write_fs (dev, CMDR1, p1);
- write_fs (dev, CMDR2, p2);
- write_fs (dev, CMDR3, p3);
-}
-#endif
-
-
-
-static void process_return_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- void *tc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. (%d)\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- switch (STATUS_CODE (qe)) {
- case 5:
- tc = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_ALLOC, "Free tc: %p\n", tc);
- kfree (tc);
- break;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- long tmp;
- struct FS_QENTRY *qe;
- struct sk_buff *skb;
- struct FS_BPENTRY *td;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- if (STATUS_CODE (qe) != 2)
- fs_dprintk (FS_DEBUG_TXMEM, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
-
- switch (STATUS_CODE (qe)) {
- case 0x01: /* This is for AAL0 where we put the chip in streaming mode */
- fallthrough;
- case 0x02:
- /* Process a real txdone entry. */
- tmp = qe->p0;
- if (tmp & 0x0f)
- printk (KERN_WARNING "td not aligned: %ld\n", tmp);
- tmp &= ~0x0f;
- td = bus_to_virt (tmp);
-
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p.\n",
- td->flags, td->next, td->bsa, td->aal_bufsize, td->skb );
-
- skb = td->skb;
- if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
- }
- td->dev->ntxpckts--;
-
- {
- static int c=0;
-
- if (!(c++ % 100)) {
- fs_dprintk (FS_DEBUG_QSIZE, "[%d]", td->dev->ntxpckts);
- }
- }
-
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- fs_dprintk (FS_DEBUG_TXMEM, "i");
- fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
- fs_kfree_skb (skb);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
- memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
- kfree (td);
- break;
- default:
- /* Here we get the tx purge inhibit command ... */
- /* Action, I believe, is "don't do anything". -- REW */
- ;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_incoming (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- struct FS_BPENTRY *pe;
- struct sk_buff *skb;
- unsigned int channo;
- struct atm_vcc *atm_vcc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping incoming queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. ",
- qe->cmd, qe->p0, qe->p1, qe->p2);
-
- fs_dprintk (FS_DEBUG_QUEUE, "-> %x: %s\n",
- STATUS_CODE (qe),
- res_strings[STATUS_CODE(qe)]);
-
- pe = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p %p.\n",
- pe->flags, pe->next, pe->bsa, pe->aal_bufsize,
- pe->skb, pe->fp);
-
- channo = qe->cmd & 0xffff;
-
- if (channo < dev->nchannels)
- atm_vcc = dev->atm_vccs[channo];
- else
- atm_vcc = NULL;
-
- /* Single buffer packet */
- switch (STATUS_CODE (qe)) {
- case 0x1:
- /* Fall through for streaming mode */
- fallthrough;
- case 0x2:/* Packet received OK.... */
- if (atm_vcc) {
- skb = pe->skb;
- pe->fp->n--;
-#if 0
- fs_dprintk (FS_DEBUG_QUEUE, "Got skb: %p\n", skb);
- if (FS_DEBUG_QUEUE & fs_debug) my_hd (bus_to_virt (pe->bsa), 0x20);
-#endif
- skb_put (skb, qe->p1 & 0xffff);
- ATM_SKB(skb)->vcc = atm_vcc;
- atomic_inc(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
- atm_vcc->push (atm_vcc, skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- } else {
- printk (KERN_ERR "Got a receive on a non-open channel %d.\n", channo);
- }
- break;
- case 0x17:/* AAL 5 CRC32 error. IFF the length field is nonzero, a buffer
- has been consumed and needs to be processed. -- REW */
- if (qe->p1 & 0xffff) {
- pe = bus_to_virt (qe->p0);
- pe->fp->n--;
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", pe->skb);
- dev_kfree_skb_any (pe->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- }
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- case 0x1f: /* Reassembly abort: no buffers. */
- /* Silently increment error counter. */
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
- printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
- STATUS_CODE(qe), res_strings[STATUS_CODE (qe)]);
- }
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-
-#define DO_DIRECTION(tp) ((tp)->traffic_class != ATM_NONE)
-
-static int fs_open(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev;
- struct fs_vcc *vcc;
- struct fs_transmit_config *tc;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
- /* struct fs_receive_config *rc;*/
- /* struct FS_QENTRY *qe; */
- int error;
- int bfp;
- int to;
- unsigned short tmc0;
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
-
- func_enter ();
-
- dev = FS_DEV(atm_vcc->dev);
- fs_dprintk (FS_DEBUG_OPEN, "fs: open on dev: %p, vcc at %p\n",
- dev, atm_vcc);
-
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR, &atm_vcc->flags);
-
- if ((atm_vcc->qos.aal != ATM_AAL5) &&
- (atm_vcc->qos.aal != ATM_AAL2))
- return -EINVAL; /* XXX AAL0 */
-
- fs_dprintk (FS_DEBUG_OPEN, "fs: (itf %d): open %d.%d\n",
- atm_vcc->dev->number, atm_vcc->vpi, atm_vcc->vci);
-
- /* XXX handle qos parameters (rate limiting) ? */
-
- vcc = kmalloc(sizeof(struct fs_vcc), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%zd)\n", vcc, sizeof(struct fs_vcc));
- if (!vcc) {
- clear_bit(ATM_VF_ADDR, &atm_vcc->flags);
- return -ENOMEM;
- }
-
- atm_vcc->dev_data = vcc;
- vcc->last_skb = NULL;
-
- init_waitqueue_head (&vcc->close_wait);
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
- if (!test_bit(ATM_VF_PARTIAL, &atm_vcc->flags)) {
- if (IS_FS50(dev)) {
- /* Increment the channel numer: take a free one next time. */
- for (to=33;to;to--, dev->channo++) {
- /* We only have 32 channels */
- if (dev->channo >= 32)
- dev->channo = 0;
- /* If we need to do RX, AND the RX is inuse, try the next */
- if (DO_DIRECTION(rxtp) && dev->atm_vccs[dev->channo])
- continue;
- /* If we need to do TX, AND the TX is inuse, try the next */
- if (DO_DIRECTION(txtp) && test_bit (dev->channo, dev->tx_inuse))
- continue;
- /* Ok, both are free! (or not needed) */
- break;
- }
- if (!to) {
- printk ("No more free channels for FS50..\n");
- kfree(vcc);
- return -EBUSY;
- }
- vcc->channo = dev->channo;
- dev->channo &= dev->channel_mask;
-
- } else {
- vcc->channo = (vpi << FS155_VCI_BITS) | (vci);
- if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
- ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
- printk ("Channel is in use for FS155.\n");
- kfree(vcc);
- return -EBUSY;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "OK. Allocated channel %x(%d).\n",
- vcc->channo, vcc->channo);
- }
-
- if (DO_DIRECTION (txtp)) {
- tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%zd)\n",
- tc, sizeof (struct fs_transmit_config));
- if (!tc) {
- fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
- kfree(vcc);
- return -ENOMEM;
- }
-
- /* Allocate the "open" entry from the high priority txq. This makes
- it most likely that the chip will notice it. It also prevents us
- from having to wait for completion. On the other hand, we may
- need to wait for completion anyway, to see if it completed
- successfully. */
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL2:
- case ATM_AAL0:
- tc->flags = 0
- | TC_FLAGS_TRANSPARENT_PAYLOAD
- | TC_FLAGS_PACKET
- | (1 << 28)
- | TC_FLAGS_TYPE_UBR /* XXX Change to VBR -- PVDL */
- | TC_FLAGS_CAL0;
- break;
- case ATM_AAL5:
- tc->flags = 0
- | TC_FLAGS_AAL5
- | TC_FLAGS_PACKET /* ??? */
- | TC_FLAGS_TYPE_CBR
- | TC_FLAGS_CAL0;
- break;
- default:
- printk ("Unknown aal: %d\n", atm_vcc->qos.aal);
- tc->flags = 0;
- }
- /* Docs are vague about this atm_hdr field. By the way, the FS
- * chip makes odd errors if lower bits are set.... -- REW */
- tc->atm_hdr = (vpi << 20) | (vci << 4);
- tmc0 = 0;
- {
- int pcr = atm_pcr_goal (txtp);
-
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
-
- /* XXX Hmm. officially we're only allowed to do this if rounding
- is round_down -- REW */
- if (IS_FS50(dev)) {
- if (pcr > 51840000/53/8) pcr = 51840000/53/8;
- } else {
- if (pcr > 155520000/53/8) pcr = 155520000/53/8;
- }
- if (!pcr) {
- /* no rate cap */
- tmc0 = IS_FS50(dev)?0x61BE:0x64c9; /* Just copied over the bits from Fujitsu -- REW */
- } else {
- int r;
- if (pcr < 0) {
- r = ROUND_DOWN;
- pcr = -pcr;
- } else {
- r = ROUND_UP;
- }
- error = make_rate (pcr, r, &tmc0, NULL);
- if (error) {
- kfree(tc);
- kfree(vcc);
- return error;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
- }
-
- tc->TMC[0] = tmc0 | 0x4000;
- tc->TMC[1] = 0; /* Unused */
- tc->TMC[2] = 0; /* Unused */
- tc->TMC[3] = 0; /* Unused */
-
- tc->spec = 0; /* UTOPIA address, UDF, HEC: Unused -> 0 */
- tc->rtag[0] = 0; /* What should I do with routing tags???
- -- Not used -- AS -- Thanks -- REW*/
- tc->rtag[1] = 0;
- tc->rtag[2] = 0;
-
- if (fs_debug & FS_DEBUG_OPEN) {
- fs_dprintk (FS_DEBUG_OPEN, "TX config record:\n");
- my_hd (tc, sizeof (*tc));
- }
-
- /* We now use the "submit_command" function to submit commands to
- the firestream. There is a define up near the definition of
- that routine that switches this routine between immediate write
- to the immediate command registers and queuing the commands in
- the HPTXQ for execution. This last technique might be more
- efficient if we know we're going to submit a whole lot of
- commands in one go, but this driver is not setup to be able to
- use such a construct. So it probably doen't matter much right
- now. -- REW */
-
- /* The command is IMMediate and INQueue. The parameters are out-of-line.. */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_TX | QE_CMD_IMM_INQ | vcc->channo,
- virt_to_bus (tc), 0, 0);
-
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- set_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- dev->atm_vccs[vcc->channo] = atm_vcc;
-
- for (bfp = 0;bfp < FS_NR_FREE_POOLS; bfp++)
- if (atm_vcc->qos.rxtp.max_sdu <= dev->rx_fp[bfp].bufsize) break;
- if (bfp >= FS_NR_FREE_POOLS) {
- fs_dprintk (FS_DEBUG_OPEN, "No free pool fits sdu: %d.\n",
- atm_vcc->qos.rxtp.max_sdu);
- /* XXX Cleanup? -- Would just calling fs_close work??? -- REW */
-
- /* XXX clear tx inuse. Close TX part? */
- dev->atm_vccs[vcc->channo] = NULL;
- kfree (vcc);
- return -EINVAL;
- }
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL0:
- case ATM_AAL2:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_TRANSP |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- case ATM_AAL5:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_AAL5 |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- }
- if (IS_FS50 (dev)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo,
- (vpi << 16) | vci, 0 ); /* XXX -- Use defines. */
- }
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- }
-
- /* Indicate we're done! */
- set_bit(ATM_VF_READY, &atm_vcc->flags);
-
- func_exit ();
- return 0;
-}
-
-
-static void fs_close(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- func_enter ();
-
- clear_bit(ATM_VF_READY, &atm_vcc->flags);
-
- fs_dprintk (FS_DEBUG_QSIZE, "--==**[%d]**==--", dev->ntxpckts);
- if (vcc->last_skb) {
- fs_dprintk (FS_DEBUG_QUEUE, "Waiting for skb %p to be sent.\n",
- vcc->last_skb);
- /* We're going to wait for the last packet to get sent on this VC. It would
- be impolite not to send them don't you think?
- XXX
- We don't know which packets didn't get sent. So if we get interrupted in
- this sleep_on, we'll lose any reference to these packets. Memory leak!
- On the other hand, it's awfully convenient that we can abort a "close" that
- is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
- }
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
-
- /* See App note XXX (Unpublished as of now) for the reason for the
- removal of the "CMD_IMM_INQ" part of the TX_PURGE_INH... -- REW */
-
- if (DO_DIRECTION (txtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | /*QE_CMD_IMM_INQ|*/ vcc->channo, 0,0,0);
- clear_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- dev->atm_vccs [vcc->channo] = NULL;
-
- /* This means that this is configured as a receive channel */
- if (IS_FS50 (dev)) {
- /* Disable the receive filter. Is 0/0 indeed an invalid receive
- channel? -- REW. Yes it is. -- Hang. Ok. I'll use -1
- (0xfff...) -- REW */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo, -1, 0 );
- }
- }
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free vcc: %p\n", vcc);
- kfree (vcc);
-
- func_exit ();
-}
-
-
-static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct FS_BPENTRY *td;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_TXMEM, "I");
- fs_dprintk (FS_DEBUG_SEND, "Send: atm_vcc %p skb %p vcc %p dev %p\n",
- atm_vcc, skb, vcc, dev);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc t-skb: %p (atm_send)\n", skb);
-
- ATM_SKB(skb)->vcc = atm_vcc;
-
- vcc->last_skb = skb;
-
- td = kmalloc (sizeof (struct FS_BPENTRY), GFP_ATOMIC);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%zd)\n", td, sizeof (struct FS_BPENTRY));
- if (!td) {
- /* Oops out of mem */
- return -ENOMEM;
- }
-
- fs_dprintk (FS_DEBUG_SEND, "first word in buffer: %x\n",
- *(int *) skb->data);
-
- td->flags = TD_EPI | TD_DATA | skb->len;
- td->next = 0;
- td->bsa = virt_to_bus (skb->data);
- td->skb = skb;
- td->dev = dev;
- dev->ntxpckts++;
-
-#ifdef DEBUG_EXTRA
- da[qd] = td;
- dq[qd].flags = td->flags;
- dq[qd].next = td->next;
- dq[qd].bsa = td->bsa;
- dq[qd].skb = td->skb;
- dq[qd].dev = td->dev;
- qd++;
- if (qd >= 60) qd = 0;
-#endif
-
- submit_queue (dev, &dev->hp_txq,
- QE_TRANSMIT_DE | vcc->channo,
- virt_to_bus (td), 0,
- virt_to_bus (td));
-
- fs_dprintk (FS_DEBUG_QUEUE, "in send: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- func_exit ();
- return 0;
-}
-
-
-/* Some function placeholders for functions we don't yet support. */
-
-#if 0
-static int fs_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- func_enter ();
- func_exit ();
- return -ENOIOCTLCMD;
-}
-
-
-static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static void fs_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- func_enter ();
- func_exit ();
-}
-
-
-static unsigned char fs_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- func_enter ();
- func_exit ();
- return 0;
-};
-
-#endif
-
-
-static const struct atmdev_ops ops = {
- .open = fs_open,
- .close = fs_close,
- .send = fs_send,
- .owner = THIS_MODULE,
- /* ioctl: fs_ioctl, */
- /* change_qos: fs_change_qos, */
-
- /* For now implement these internally here... */
- /* phy_put: fs_phy_put, */
- /* phy_get: fs_phy_get, */
-};
-
-
-static void undocumented_pci_fix(struct pci_dev *pdev)
-{
- u32 tint;
-
- /* The Windows driver says: */
- /* Switch off FireStream Retry Limit Threshold
- */
-
- /* The register at 0x28 is documented as "reserved", no further
- comments. */
-
- pci_read_config_dword (pdev, 0x28, &tint);
- if (tint != 0x80) {
- tint = 0x80;
- pci_write_config_dword (pdev, 0x28, tint);
- }
-}
-
-
-
-/**************************************************************************
- * PHY routines *
- **************************************************************************/
-
-static void write_phy(struct fs_dev *dev, int regnum, int val)
-{
- submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ,
- regnum, val, 0);
-}
-
-static int init_phy(struct fs_dev *dev, struct reginit_item *reginit)
-{
- int i;
-
- func_enter ();
- while (reginit->reg != PHY_EOF) {
- if (reginit->reg == PHY_CLEARALL) {
- /* "PHY_CLEARALL means clear all registers. Numregisters is in "val". */
- for (i=0;i<reginit->val;i++) {
- write_phy (dev, i, 0);
- }
- } else {
- write_phy (dev, reginit->reg, reginit->val);
- }
- reginit++;
- }
- func_exit ();
- return 0;
-}
-
-static void reset_chip (struct fs_dev *dev)
-{
- int i;
-
- write_fs (dev, SARMODE0, SARMODE0_SRTS0);
-
- /* Undocumented delay */
- udelay (128);
-
- /* The "internal registers are documented to all reset to zero, but
- comments & code in the Windows driver indicates that the pools are
- NOT reset. */
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- write_fs (dev, FP_CNF (RXB_FP(i)), 0);
- write_fs (dev, FP_SA (RXB_FP(i)), 0);
- write_fs (dev, FP_EA (RXB_FP(i)), 0);
- write_fs (dev, FP_CNT (RXB_FP(i)), 0);
- write_fs (dev, FP_CTU (RXB_FP(i)), 0);
- }
-
- /* The same goes for the match channel registers, although those are
- NOT documented that way in the Windows driver. -- REW */
- /* The Windows driver DOES write 0 to these registers somewhere in
- the init sequence. However, a small hardware-feature, will
- prevent reception of data on VPI/VCI = 0/0 (Unless the channel
- allocated happens to have no disabled channels that have a lower
- number. -- REW */
-
- /* Clear the match channel registers. */
- if (IS_FS50 (dev)) {
- for (i=0;i<FS50_NR_CHANNELS;i++) {
- write_fs (dev, 0x200 + i * 4, -1);
- }
- }
-}
-
-static void *aligned_kmalloc(int size, gfp_t flags, int alignment)
-{
- void *t;
-
- if (alignment <= 0x10) {
- t = kmalloc (size, flags);
- if ((unsigned long)t & (alignment-1)) {
- printk ("Kmalloc doesn't align things correctly! %p\n", t);
- kfree (t);
- return aligned_kmalloc (size, flags, alignment * 4);
- }
- return t;
- }
- printk (KERN_ERR "Request for > 0x10 alignment not yet implemented (hard!)\n");
- return NULL;
-}
-
-static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
- int nentries, int is_rq)
-{
- int sz = nentries * sizeof (struct FS_QENTRY);
- struct FS_QENTRY *p;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
- queue, nentries);
-
- p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc queue: %p(%d)\n", p, sz);
-
- if (!p) return 0;
-
- write_fs (dev, Q_SA(queue), virt_to_bus(p));
- write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1));
- write_fs (dev, Q_WP(queue), virt_to_bus(p));
- write_fs (dev, Q_RP(queue), virt_to_bus(p));
- if (is_rq) {
- /* Configuration for the receive queue: 0: interrupt immediately,
- no pre-warning to empty queues: We do our best to keep the
- queue filled anyway. */
- write_fs (dev, Q_CNF(queue), 0 );
- }
-
- txq->sa = p;
- txq->ea = p;
- txq->offset = queue;
-
- func_exit ();
- return 1;
-}
-
-
-static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
- int bufsize, int nr_buffers)
-{
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
-
- write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
- write_fs (dev, FP_SA(queue), 0);
- write_fs (dev, FP_EA(queue), 0);
- write_fs (dev, FP_CTU(queue), 0);
- write_fs (dev, FP_CNT(queue), 0);
-
- fp->offset = queue;
- fp->bufsize = bufsize;
- fp->nr_buffers = nr_buffers;
-
- func_exit ();
- return 1;
-}
-
-
-static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *fp)
-{
-#if 0
- /* This seems to be unreliable.... */
- return read_fs (dev, FP_CNT (fp->offset));
-#else
- return fp->n;
-#endif
-}
-
-
-/* Check if this gets going again if a pool ever runs out. -- Yes, it
- does. I've seen "receive abort: no buffers" and things started
- working again after that... -- REW */
-
-static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
- gfp_t gfp_flags)
-{
- struct FS_BPENTRY *qe, *ne;
- struct sk_buff *skb;
- int n = 0;
- u32 qe_tmp;
-
- fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n",
- fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n,
- fp->nr_buffers);
- while (nr_buffers_in_freepool(dev, fp) < fp->nr_buffers) {
-
- skb = alloc_skb (fp->bufsize, gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize);
- if (!skb) break;
- ne = kmalloc (sizeof (struct FS_BPENTRY), gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%zd)\n", ne, sizeof (struct FS_BPENTRY));
- if (!ne) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb);
- dev_kfree_skb_any (skb);
- break;
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Adding skb %p desc %p -> %p(%p) ",
- skb, ne, skb->data, skb->head);
- n++;
- ne->flags = FP_FLAGS_EPI | fp->bufsize;
- ne->next = virt_to_bus (NULL);
- ne->bsa = virt_to_bus (skb->data);
- ne->aal_bufsize = fp->bufsize;
- ne->skb = skb;
- ne->fp = fp;
-
- /*
- * FIXME: following code encodes and decodes
- * machine pointers (could be 64-bit) into a
- * 32-bit register.
- */
-
- qe_tmp = read_fs (dev, FP_EA(fp->offset));
- fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp);
- if (qe_tmp) {
- qe = bus_to_virt ((long) qe_tmp);
- qe->next = virt_to_bus(ne);
- qe->flags &= ~FP_FLAGS_EPI;
- } else
- write_fs (dev, FP_SA(fp->offset), virt_to_bus(ne));
-
- write_fs (dev, FP_EA(fp->offset), virt_to_bus (ne));
- fp->n++; /* XXX Atomic_inc? */
- write_fs (dev, FP_CTU(fp->offset), 1);
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n);
-}
-
-static void free_queue(struct fs_dev *dev, struct queue *txq)
-{
- func_enter ();
-
- write_fs (dev, Q_SA(txq->offset), 0);
- write_fs (dev, Q_EA(txq->offset), 0);
- write_fs (dev, Q_RP(txq->offset), 0);
- write_fs (dev, Q_WP(txq->offset), 0);
- /* Configuration ? */
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa);
- kfree (txq->sa);
-
- func_exit ();
-}
-
-static void free_freepool(struct fs_dev *dev, struct freepool *fp)
-{
- func_enter ();
-
- write_fs (dev, FP_CNF(fp->offset), 0);
- write_fs (dev, FP_SA (fp->offset), 0);
- write_fs (dev, FP_EA (fp->offset), 0);
- write_fs (dev, FP_CNT(fp->offset), 0);
- write_fs (dev, FP_CTU(fp->offset), 0);
-
- func_exit ();
-}
-
-
-
-static irqreturn_t fs_irq (int irq, void *dev_id)
-{
- int i;
- u32 status;
- struct fs_dev *dev = dev_id;
-
- status = read_fs (dev, ISR);
- if (!status)
- return IRQ_NONE;
-
- func_enter ();
-
-#ifdef IRQ_RATE_LIMIT
- /* Aaargh! I'm ashamed. This costs more lines-of-code than the actual
- interrupt routine!. (Well, used to when I wrote that comment) -- REW */
- {
- static int lastjif;
- static int nintr=0;
-
- if (lastjif == jiffies) {
- if (++nintr > IRQ_RATE_LIMIT) {
- free_irq (dev->irq, dev_id);
- printk (KERN_ERR "fs: Too many interrupts. Turning off interrupt %d.\n",
- dev->irq);
- }
- } else {
- lastjif = jiffies;
- nintr = 0;
- }
- }
-#endif
- fs_dprintk (FS_DEBUG_QUEUE, "in intr: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- /* print the bits in the ISR register. */
- if (fs_debug & FS_DEBUG_IRQ) {
- /* The FS_DEBUG things are unnecessary here. But this way it is
- clear for grep that these are debug prints. */
- fs_dprintk (FS_DEBUG_IRQ, "IRQ status:");
- for (i=0;i<27;i++)
- if (status & (1 << i))
- fs_dprintk (FS_DEBUG_IRQ, " %s", irq_bitname[i]);
- fs_dprintk (FS_DEBUG_IRQ, "\n");
- }
-
- if (status & ISR_RBRQ0_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (0)!!!!\n");
- process_incoming (dev, &dev->rx_rq[0]);
- /* items mentioned on RBRQ0 are from FP 0 or 1. */
- top_off_fp (dev, &dev->rx_fp[0], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[1], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ1_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (1)!!!!\n");
- process_incoming (dev, &dev->rx_rq[1]);
- top_off_fp (dev, &dev->rx_fp[2], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[3], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ2_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (2)!!!!\n");
- process_incoming (dev, &dev->rx_rq[2]);
- top_off_fp (dev, &dev->rx_fp[4], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[5], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ3_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (3)!!!!\n");
- process_incoming (dev, &dev->rx_rq[3]);
- top_off_fp (dev, &dev->rx_fp[6], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[7], GFP_ATOMIC);
- }
-
- if (status & ISR_CSQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Command executed ok!\n");
- process_return_queue (dev, &dev->st_q);
- }
-
- if (status & ISR_TBRQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
- process_txdone_queue (dev, &dev->tx_relq);
- }
-
- func_exit ();
- return IRQ_HANDLED;
-}
-
-
-#ifdef FS_POLL_FREQ
-static void fs_poll (struct timer_list *t)
-{
- struct fs_dev *dev = from_timer(dev, t, timer);
-
- fs_irq (0, dev);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-}
-#endif
-
-static int fs_init(struct fs_dev *dev)
-{
- struct pci_dev *pci_dev;
- int isr, to;
- int i;
-
- func_enter ();
- pci_dev = dev->pci_dev;
-
- printk (KERN_INFO "found a FireStream %d card, base %16llx, irq%d.\n",
- IS_FS50(dev)?50:155,
- (unsigned long long)pci_resource_start(pci_dev, 0),
- dev->pci_dev->irq);
-
- if (fs_debug & FS_DEBUG_INIT)
- my_hd ((unsigned char *) dev, sizeof (*dev));
-
- undocumented_pci_fix (pci_dev);
-
- dev->hw_base = pci_resource_start(pci_dev, 0);
-
- dev->base = ioremap(dev->hw_base, 0x1000);
- if (!dev->base)
- return 1;
-
- reset_chip (dev);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (1 * SARMODE0_CWRE)
- | (IS_FS50(dev) ? SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50(dev) ? (0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k)));
-
- /* 10ms * 100 is 1 second. That should be enough, as AN3:9 says it takes
- 1ms. */
- to = 100;
- while (--to) {
- isr = read_fs (dev, ISR);
-
- /* This bit is documented as "RESERVED" */
- if (isr & ISR_INIT_ERR) {
- printk (KERN_ERR "Error initializing the FS... \n");
- goto unmap;
- }
- if (isr & ISR_INIT) {
- fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
- break;
- }
-
- /* Try again after 10ms. */
- msleep(10);
- }
-
- if (!to) {
- printk (KERN_ERR "timeout initializing the FS... \n");
- goto unmap;
- }
-
- /* XXX fix for fs155 */
- dev->channel_mask = 0x1f;
- dev->channo = 0;
-
- /* AN3: 10 */
- write_fs (dev, SARMODE1, 0
- | (fs_keystream * SARMODE1_DEFHEC) /* XXX PHY */
- | ((loopback == 1) * SARMODE1_TSTLP) /* XXX Loopback mode enable... */
- | (1 * SARMODE1_DCRM)
- | (1 * SARMODE1_DCOAM)
- | (0 * SARMODE1_OAMCRC)
- | (0 * SARMODE1_DUMPE)
- | (0 * SARMODE1_GPLEN)
- | (0 * SARMODE1_GNAM)
- | (0 * SARMODE1_GVAS)
- | (0 * SARMODE1_GPAS)
- | (1 * SARMODE1_GPRI)
- | (0 * SARMODE1_PMS)
- | (0 * SARMODE1_GFCR)
- | (1 * SARMODE1_HECM2)
- | (1 * SARMODE1_HECM1)
- | (1 * SARMODE1_HECM0)
- | (1 << 12) /* That's what hang's driver does. Program to 0 */
- | (0 * 0xff) /* XXX FS155 */);
-
-
- /* Cal prescale etc */
-
- /* AN3: 11 */
- write_fs (dev, TMCONF, 0x0000000f);
- write_fs (dev, CALPRESCALE, 0x01010101 * num);
- write_fs (dev, 0x80, 0x000F00E4);
-
- /* AN3: 12 */
- write_fs (dev, CELLOSCONF, 0
- | ( 0 * CELLOSCONF_CEN)
- | ( CELLOSCONF_SC1)
- | (0x80 * CELLOSCONF_COBS)
- | (num * CELLOSCONF_COPK) /* Changed from 0xff to 0x5a */
- | (num * CELLOSCONF_COST));/* after a hint from Hang.
- * performance jumped 50->70... */
-
- /* Magic value by Hang */
- write_fs (dev, CELLOSCONF_COST, 0x0B809191);
-
- if (IS_FS50 (dev)) {
- write_fs (dev, RAS0, RAS0_DCD_XHLT);
- dev->atm_dev->ci_range.vpi_bits = 12;
- dev->atm_dev->ci_range.vci_bits = 16;
- dev->nchannels = FS50_NR_CHANNELS;
- } else {
- write_fs (dev, RAS0, RAS0_DCD_XHLT
- | (((1 << FS155_VPI_BITS) - 1) * RAS0_VPSEL)
- | (((1 << FS155_VCI_BITS) - 1) * RAS0_VCSEL));
- /* We can chose the split arbitrarily. We might be able to
- support more. Whatever. This should do for now. */
- dev->atm_dev->ci_range.vpi_bits = FS155_VPI_BITS;
- dev->atm_dev->ci_range.vci_bits = FS155_VCI_BITS;
-
- /* Address bits we can't use should be compared to 0. */
- write_fs (dev, RAC, 0);
-
- /* Manual (AN9, page 6) says ASF1=0 means compare Utopia address
- * too. I can't find ASF1 anywhere. Anyway, we AND with just the
- * other bits, then compare with 0, which is exactly what we
- * want. */
- write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1);
- dev->nchannels = FS155_NR_CHANNELS;
- }
- dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *),
- GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%zd)\n",
- dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *));
-
- if (!dev->atm_vccs) {
- printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
-
- dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n",
- dev->atm_vccs, dev->nchannels / 8);
-
- if (!dev->tx_inuse) {
- printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
- /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
- /* -- RAS2 : FS50 only: Default is OK. */
-
- /* DMAMODE, default should be OK. -- REW */
- write_fs (dev, DMAMR, DMAMR_TX_MODE_FULL);
-
- init_q (dev, &dev->hp_txq, TX_PQ(TXQ_HP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->lp_txq, TX_PQ(TXQ_LP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->tx_relq, TXB_RQ, TXQ_NENTRIES, 1);
- init_q (dev, &dev->st_q, ST_Q, TXQ_NENTRIES, 1);
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- init_fp (dev, &dev->rx_fp[i], RXB_FP(i),
- rx_buf_sizes[i], rx_pool_sizes[i]);
- top_off_fp (dev, &dev->rx_fp[i], GFP_KERNEL);
- }
-
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- init_q (dev, &dev->rx_rq[i], RXB_RQ(i), RXRQ_NENTRIES, 1);
-
- dev->irq = pci_dev->irq;
- if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
- printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
- /* XXX undo all previous stuff... */
- goto unmap;
- }
- fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
-
- /* We want to be notified of most things. Just the statistics count
- overflows are not interesting */
- write_fs (dev, IMR, 0
- | ISR_RBRQ0_W
- | ISR_RBRQ1_W
- | ISR_RBRQ2_W
- | ISR_RBRQ3_W
- | ISR_TBRQ_W
- | ISR_CSQ_W);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_GINT)
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (0 * SARMODE0_CWRE)
- | (IS_FS50(dev)?SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50 (dev)?(0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k))
- | (1 * SARMODE0_RUN));
-
- init_phy (dev, PHY_NTC_INIT);
-
- if (loopback == 2) {
- write_phy (dev, 0x39, 0x000e);
- }
-
-#ifdef FS_POLL_FREQ
- timer_setup(&dev->timer, fs_poll, 0);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-#endif
-
- dev->atm_dev->dev_data = dev;
-
- func_exit ();
- return 0;
-unmap:
- iounmap(dev->base);
- return 1;
-}
-
-static int firestream_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *atm_dev;
- struct fs_dev *fs_dev;
-
- if (pci_enable_device(pci_dev))
- goto err_out;
-
- fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%zd)\n",
- fs_dev, sizeof (struct fs_dev));
- if (!fs_dev)
- goto err_out;
- atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL);
- if (!atm_dev)
- goto err_out_free_fs_dev;
-
- fs_dev->pci_dev = pci_dev;
- fs_dev->atm_dev = atm_dev;
- fs_dev->flags = ent->driver_data;
-
- if (fs_init(fs_dev))
- goto err_out_free_atm_dev;
-
- fs_dev->next = fs_boards;
- fs_boards = fs_dev;
- return 0;
-
- err_out_free_atm_dev:
- atm_dev_deregister(atm_dev);
- err_out_free_fs_dev:
- kfree(fs_dev);
- err_out:
- return -ENODEV;
-}
-
-static void firestream_remove_one(struct pci_dev *pdev)
-{
- int i;
- struct fs_dev *dev, *nxtdev;
- struct fs_vcc *vcc;
- struct FS_BPENTRY *fp, *nxt;
-
- func_enter ();
-
-#if 0
- printk ("hptxq:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %08x %08x %08x %08x \n",
- i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2);
- qp++;
- if (qp >= 60) qp = 0;
- }
-
- printk ("descriptors:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %p: %08x %08x %p %p\n",
- i, da[qd], dq[qd].flags, dq[qd].bsa, dq[qd].skb, dq[qd].dev);
- qd++;
- if (qd >= 60) qd = 0;
- }
-#endif
-
- for (dev = fs_boards;dev != NULL;dev=nxtdev) {
- fs_dprintk (FS_DEBUG_CLEANUP, "Releasing resources for dev at %p.\n", dev);
-
- /* XXX Hit all the tx channels too! */
-
- for (i=0;i < dev->nchannels;i++) {
- if (dev->atm_vccs[i]) {
- vcc = FS_VCC (dev->atm_vccs[i]);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
-
- }
- }
-
- /* XXX Wait a while for the chip to release all buffers. */
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- for (fp=bus_to_virt (read_fs (dev, FP_SA(dev->rx_fp[i].offset)));
- !(fp->flags & FP_FLAGS_EPI);fp = nxt) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- nxt = bus_to_virt (fp->next);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
-
- /* Hang the chip in "reset", prevent it clobbering memory that is
- no longer ours. */
- reset_chip (dev);
-
- fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
- free_irq (dev->irq, dev);
- del_timer_sync (&dev->timer);
-
- atm_dev_deregister(dev->atm_dev);
- free_queue (dev, &dev->hp_txq);
- free_queue (dev, &dev->lp_txq);
- free_queue (dev, &dev->tx_relq);
- free_queue (dev, &dev->st_q);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free atmvccs: %p\n", dev->atm_vccs);
- kfree (dev->atm_vccs);
-
- for (i=0;i< FS_NR_FREE_POOLS;i++)
- free_freepool (dev, &dev->rx_fp[i]);
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- free_queue (dev, &dev->rx_rq[i]);
-
- iounmap(dev->base);
- fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
- nxtdev = dev->next;
- kfree (dev);
- }
-
- func_exit ();
-}
-
-static const struct pci_device_id firestream_pci_tbl[] = {
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, firestream_pci_tbl);
-
-static struct pci_driver firestream_driver = {
- .name = "firestream",
- .id_table = firestream_pci_tbl,
- .probe = firestream_init_one,
- .remove = firestream_remove_one,
-};
-
-static int __init firestream_init_module (void)
-{
- int error;
-
- func_enter ();
- error = pci_register_driver(&firestream_driver);
- func_exit ();
- return error;
-}
-
-static void __exit firestream_cleanup_module(void)
-{
- pci_unregister_driver(&firestream_driver);
-}
-
-module_init(firestream_init_module);
-module_exit(firestream_cleanup_module);
-
-MODULE_LICENSE("GPL");
-
-
-
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
deleted file mode 100644
index 6d684160808d..000000000000
--- a/drivers/atm/firestream.h
+++ /dev/null
@@ -1,502 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* drivers/atm/firestream.h - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-/***********************************************************************
- * first the defines for the chip. *
- ***********************************************************************/
-
-
-/********************* General chip parameters. ************************/
-
-#define FS_NR_FREE_POOLS 8
-#define FS_NR_RX_QUEUES 4
-
-
-/********************* queues and queue access macros ******************/
-
-
-/* A queue entry. */
-struct FS_QENTRY {
- u32 cmd;
- u32 p0, p1, p2;
-};
-
-
-/* A freepool entry. */
-struct FS_BPENTRY {
- u32 flags;
- u32 next;
- u32 bsa;
- u32 aal_bufsize;
-
- /* The hardware doesn't look at this, but we need the SKB somewhere... */
- struct sk_buff *skb;
- struct freepool *fp;
- struct fs_dev *dev;
-};
-
-
-#define STATUS_CODE(qe) ((qe->cmd >> 22) & 0x3f)
-
-
-/* OFFSETS against the base of a QUEUE... */
-#define QSA 0x00
-#define QEA 0x04
-#define QRP 0x08
-#define QWP 0x0c
-#define QCNF 0x10 /* Only for Release queues! */
-/* Not for the transmit pending queue. */
-
-
-/* OFFSETS against the base of a FREE POOL... */
-#define FPCNF 0x00
-#define FPSA 0x04
-#define FPEA 0x08
-#define FPCNT 0x0c
-#define FPCTU 0x10
-
-#define Q_SA(b) (b + QSA )
-#define Q_EA(b) (b + QEA )
-#define Q_RP(b) (b + QRP )
-#define Q_WP(b) (b + QWP )
-#define Q_CNF(b) (b + QCNF)
-
-#define FP_CNF(b) (b + FPCNF)
-#define FP_SA(b) (b + FPSA)
-#define FP_EA(b) (b + FPEA)
-#define FP_CNT(b) (b + FPCNT)
-#define FP_CTU(b) (b + FPCTU)
-
-/* bits in a queue register. */
-#define Q_FULL 0x1
-#define Q_EMPTY 0x2
-#define Q_INCWRAP 0x4
-#define Q_ADDR_MASK 0xfffffff0
-
-/* bits in a FreePool config register */
-#define RBFP_RBS (0x1 << 16)
-#define RBFP_RBSVAL (0x1 << 15)
-#define RBFP_CME (0x1 << 12)
-#define RBFP_DLP (0x1 << 11)
-#define RBFP_BFPWT (0x1 << 0)
-
-
-
-
-/* FireStream commands. */
-#define QE_CMD_NULL (0x00 << 22)
-#define QE_CMD_REG_RD (0x01 << 22)
-#define QE_CMD_REG_RDM (0x02 << 22)
-#define QE_CMD_REG_WR (0x03 << 22)
-#define QE_CMD_REG_WRM (0x04 << 22)
-#define QE_CMD_CONFIG_TX (0x05 << 22)
-#define QE_CMD_CONFIG_RX (0x06 << 22)
-#define QE_CMD_PRP_RD (0x07 << 22)
-#define QE_CMD_PRP_RDM (0x2a << 22)
-#define QE_CMD_PRP_WR (0x09 << 22)
-#define QE_CMD_PRP_WRM (0x2b << 22)
-#define QE_CMD_RX_EN (0x0a << 22)
-#define QE_CMD_RX_PURGE (0x0b << 22)
-#define QE_CMD_RX_PURGE_INH (0x0c << 22)
-#define QE_CMD_TX_EN (0x0d << 22)
-#define QE_CMD_TX_PURGE (0x0e << 22)
-#define QE_CMD_TX_PURGE_INH (0x0f << 22)
-#define QE_CMD_RST_CG (0x10 << 22)
-#define QE_CMD_SET_CG (0x11 << 22)
-#define QE_CMD_RST_CLP (0x12 << 22)
-#define QE_CMD_SET_CLP (0x13 << 22)
-#define QE_CMD_OVERRIDE (0x14 << 22)
-#define QE_CMD_ADD_BFP (0x15 << 22)
-#define QE_CMD_DUMP_TX (0x16 << 22)
-#define QE_CMD_DUMP_RX (0x17 << 22)
-#define QE_CMD_LRAM_RD (0x18 << 22)
-#define QE_CMD_LRAM_RDM (0x28 << 22)
-#define QE_CMD_LRAM_WR (0x19 << 22)
-#define QE_CMD_LRAM_WRM (0x29 << 22)
-#define QE_CMD_LRAM_BSET (0x1a << 22)
-#define QE_CMD_LRAM_BCLR (0x1b << 22)
-#define QE_CMD_CONFIG_SEGM (0x1c << 22)
-#define QE_CMD_READ_SEGM (0x1d << 22)
-#define QE_CMD_CONFIG_ROUT (0x1e << 22)
-#define QE_CMD_READ_ROUT (0x1f << 22)
-#define QE_CMD_CONFIG_TM (0x20 << 22)
-#define QE_CMD_READ_TM (0x21 << 22)
-#define QE_CMD_CONFIG_TXBM (0x22 << 22)
-#define QE_CMD_READ_TXBM (0x23 << 22)
-#define QE_CMD_CONFIG_RXBM (0x24 << 22)
-#define QE_CMD_READ_RXBM (0x25 << 22)
-#define QE_CMD_CONFIG_REAS (0x26 << 22)
-#define QE_CMD_READ_REAS (0x27 << 22)
-
-#define QE_TRANSMIT_DE (0x0 << 30)
-#define QE_CMD_LINKED (0x1 << 30)
-#define QE_CMD_IMM (0x2 << 30)
-#define QE_CMD_IMM_INQ (0x3 << 30)
-
-#define TD_EPI (0x1 << 27)
-#define TD_COMMAND (0x1 << 28)
-
-#define TD_DATA (0x0 << 29)
-#define TD_RM_CELL (0x1 << 29)
-#define TD_OAM_CELL (0x2 << 29)
-#define TD_OAM_CELL_SEGMENT (0x3 << 29)
-
-#define TD_BPI (0x1 << 20)
-
-#define FP_FLAGS_EPI (0x1 << 27)
-
-
-#define TX_PQ(i) (0x00 + (i) * 0x10)
-#define TXB_RQ (0x20)
-#define ST_Q (0x48)
-#define RXB_FP(i) (0x90 + (i) * 0x14)
-#define RXB_RQ(i) (0x134 + (i) * 0x14)
-
-
-#define TXQ_HP 0
-#define TXQ_LP 1
-
-/* Phew. You don't want to know how many revisions these simple queue
- * address macros went through before I got them nice and compact as
- * they are now. -- REW
- */
-
-
-/* And now for something completely different:
- * The rest of the registers... */
-
-
-#define CMDR0 0x34
-#define CMDR1 0x38
-#define CMDR2 0x3c
-#define CMDR3 0x40
-
-
-#define SARMODE0 0x5c
-
-#define SARMODE0_TXVCS_0 (0x0 << 0)
-#define SARMODE0_TXVCS_1k (0x1 << 0)
-#define SARMODE0_TXVCS_2k (0x2 << 0)
-#define SARMODE0_TXVCS_4k (0x3 << 0)
-#define SARMODE0_TXVCS_8k (0x4 << 0)
-#define SARMODE0_TXVCS_16k (0x5 << 0)
-#define SARMODE0_TXVCS_32k (0x6 << 0)
-#define SARMODE0_TXVCS_64k (0x7 << 0)
-#define SARMODE0_TXVCS_32 (0x8 << 0)
-
-#define SARMODE0_ABRVCS_0 (0x0 << 4)
-#define SARMODE0_ABRVCS_512 (0x1 << 4)
-#define SARMODE0_ABRVCS_1k (0x2 << 4)
-#define SARMODE0_ABRVCS_2k (0x3 << 4)
-#define SARMODE0_ABRVCS_4k (0x4 << 4)
-#define SARMODE0_ABRVCS_8k (0x5 << 4)
-#define SARMODE0_ABRVCS_16k (0x6 << 4)
-#define SARMODE0_ABRVCS_32k (0x7 << 4)
-#define SARMODE0_ABRVCS_32 (0x9 << 4) /* The others are "8", this one really has to
- be 9. Tell me you don't believe me. -- REW */
-
-#define SARMODE0_RXVCS_0 (0x0 << 8)
-#define SARMODE0_RXVCS_1k (0x1 << 8)
-#define SARMODE0_RXVCS_2k (0x2 << 8)
-#define SARMODE0_RXVCS_4k (0x3 << 8)
-#define SARMODE0_RXVCS_8k (0x4 << 8)
-#define SARMODE0_RXVCS_16k (0x5 << 8)
-#define SARMODE0_RXVCS_32k (0x6 << 8)
-#define SARMODE0_RXVCS_64k (0x7 << 8)
-#define SARMODE0_RXVCS_32 (0x8 << 8)
-
-#define SARMODE0_CALSUP_1 (0x0 << 12)
-#define SARMODE0_CALSUP_2 (0x1 << 12)
-#define SARMODE0_CALSUP_3 (0x2 << 12)
-#define SARMODE0_CALSUP_4 (0x3 << 12)
-
-#define SARMODE0_PRPWT_FS50_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS50_2 (0x1 << 14)
-#define SARMODE0_PRPWT_FS50_5 (0x2 << 14)
-#define SARMODE0_PRPWT_FS50_11 (0x3 << 14)
-
-#define SARMODE0_PRPWT_FS155_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS155_1 (0x1 << 14)
-#define SARMODE0_PRPWT_FS155_2 (0x2 << 14)
-#define SARMODE0_PRPWT_FS155_3 (0x3 << 14)
-
-#define SARMODE0_SRTS0 (0x1 << 23)
-#define SARMODE0_SRTS1 (0x1 << 24)
-
-#define SARMODE0_RUN (0x1 << 25)
-
-#define SARMODE0_UNLOCK (0x1 << 26)
-#define SARMODE0_CWRE (0x1 << 27)
-
-
-#define SARMODE0_INTMODE_READCLEAR (0x0 << 28)
-#define SARMODE0_INTMODE_READNOCLEAR (0x1 << 28)
-#define SARMODE0_INTMODE_READNOCLEARINHIBIT (0x2 << 28)
-#define SARMODE0_INTMODE_READCLEARINHIBIT (0x3 << 28) /* Tell me you don't believe me. */
-
-#define SARMODE0_GINT (0x1 << 30)
-#define SARMODE0_SHADEN (0x1 << 31)
-
-
-#define SARMODE1 0x60
-
-
-#define SARMODE1_TRTL_SHIFT 0 /* Program to 0 */
-#define SARMODE1_RRTL_SHIFT 4 /* Program to 0 */
-
-#define SARMODE1_TAGM (0x1 << 8) /* Program to 0 */
-
-#define SARMODE1_HECM0 (0x1 << 9)
-#define SARMODE1_HECM1 (0x1 << 10)
-#define SARMODE1_HECM2 (0x1 << 11)
-
-#define SARMODE1_GFCE (0x1 << 14)
-#define SARMODE1_GFCR (0x1 << 15)
-#define SARMODE1_PMS (0x1 << 18)
-#define SARMODE1_GPRI (0x1 << 19)
-#define SARMODE1_GPAS (0x1 << 20)
-#define SARMODE1_GVAS (0x1 << 21)
-#define SARMODE1_GNAM (0x1 << 22)
-#define SARMODE1_GPLEN (0x1 << 23)
-#define SARMODE1_DUMPE (0x1 << 24)
-#define SARMODE1_OAMCRC (0x1 << 25)
-#define SARMODE1_DCOAM (0x1 << 26)
-#define SARMODE1_DCRM (0x1 << 27)
-#define SARMODE1_TSTLP (0x1 << 28)
-#define SARMODE1_DEFHEC (0x1 << 29)
-
-
-#define ISR 0x64
-#define IUSR 0x68
-#define IMR 0x6c
-
-#define ISR_LPCO (0x1 << 0)
-#define ISR_DPCO (0x1 << 1)
-#define ISR_RBRQ0_W (0x1 << 2)
-#define ISR_RBRQ1_W (0x1 << 3)
-#define ISR_RBRQ2_W (0x1 << 4)
-#define ISR_RBRQ3_W (0x1 << 5)
-#define ISR_RBRQ0_NF (0x1 << 6)
-#define ISR_RBRQ1_NF (0x1 << 7)
-#define ISR_RBRQ2_NF (0x1 << 8)
-#define ISR_RBRQ3_NF (0x1 << 9)
-#define ISR_BFP_SC (0x1 << 10)
-#define ISR_INIT (0x1 << 11)
-#define ISR_INIT_ERR (0x1 << 12) /* Documented as "reserved" */
-#define ISR_USCEO (0x1 << 13)
-#define ISR_UPEC0 (0x1 << 14)
-#define ISR_VPFCO (0x1 << 15)
-#define ISR_CRCCO (0x1 << 16)
-#define ISR_HECO (0x1 << 17)
-#define ISR_TBRQ_W (0x1 << 18)
-#define ISR_TBRQ_NF (0x1 << 19)
-#define ISR_CTPQ_E (0x1 << 20)
-#define ISR_GFC_C0 (0x1 << 21)
-#define ISR_PCI_FTL (0x1 << 22)
-#define ISR_CSQ_W (0x1 << 23)
-#define ISR_CSQ_NF (0x1 << 24)
-#define ISR_EXT_INT (0x1 << 25)
-#define ISR_RXDMA_S (0x1 << 26)
-
-
-#define TMCONF 0x78
-/* Bits? */
-
-
-#define CALPRESCALE 0x7c
-/* Bits? */
-
-#define CELLOSCONF 0x84
-#define CELLOSCONF_COTS (0x1 << 28)
-#define CELLOSCONF_CEN (0x1 << 27)
-#define CELLOSCONF_SC8 (0x3 << 24)
-#define CELLOSCONF_SC4 (0x2 << 24)
-#define CELLOSCONF_SC2 (0x1 << 24)
-#define CELLOSCONF_SC1 (0x0 << 24)
-
-#define CELLOSCONF_COBS (0x1 << 16)
-#define CELLOSCONF_COPK (0x1 << 8)
-#define CELLOSCONF_COST (0x1 << 0)
-/* Bits? */
-
-#define RAS0 0x1bc
-#define RAS0_DCD_XHLT (0x1 << 31)
-
-#define RAS0_VPSEL (0x1 << 16)
-#define RAS0_VCSEL (0x1 << 0)
-
-#define RAS1 0x1c0
-#define RAS1_UTREG (0x1 << 5)
-
-
-#define DMAMR 0x1cc
-#define DMAMR_TX_MODE_FULL (0x0 << 0)
-#define DMAMR_TX_MODE_PART (0x1 << 0)
-#define DMAMR_TX_MODE_NONE (0x2 << 0) /* And 3 */
-
-
-
-#define RAS2 0x280
-
-#define RAS2_NNI (0x1 << 0)
-#define RAS2_USEL (0x1 << 1)
-#define RAS2_UBS (0x1 << 2)
-
-
-
-struct fs_transmit_config {
- u32 flags;
- u32 atm_hdr;
- u32 TMC[4];
- u32 spec;
- u32 rtag[3];
-};
-
-#define TC_FLAGS_AAL5 (0x0 << 29)
-#define TC_FLAGS_TRANSPARENT_PAYLOAD (0x1 << 29)
-#define TC_FLAGS_TRANSPARENT_CELL (0x2 << 29)
-#define TC_FLAGS_STREAMING (0x1 << 28)
-#define TC_FLAGS_PACKET (0x0)
-#define TC_FLAGS_TYPE_ABR (0x0 << 22)
-#define TC_FLAGS_TYPE_CBR (0x1 << 22)
-#define TC_FLAGS_TYPE_VBR (0x2 << 22)
-#define TC_FLAGS_TYPE_UBR (0x3 << 22)
-#define TC_FLAGS_CAL0 (0x0 << 20)
-#define TC_FLAGS_CAL1 (0x1 << 20)
-#define TC_FLAGS_CAL2 (0x2 << 20)
-#define TC_FLAGS_CAL3 (0x3 << 20)
-
-
-#define RC_FLAGS_NAM (0x1 << 13)
-#define RC_FLAGS_RXBM_PSB (0x0 << 14)
-#define RC_FLAGS_RXBM_CIF (0x1 << 14)
-#define RC_FLAGS_RXBM_PMB (0x2 << 14)
-#define RC_FLAGS_RXBM_STR (0x4 << 14)
-#define RC_FLAGS_RXBM_SAF (0x6 << 14)
-#define RC_FLAGS_RXBM_POS (0x6 << 14)
-#define RC_FLAGS_BFPS (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP0 (0x0 << 17)
-#define RC_FLAGS_BFPS_BFP1 (0x1 << 17)
-#define RC_FLAGS_BFPS_BFP2 (0x2 << 17)
-#define RC_FLAGS_BFPS_BFP3 (0x3 << 17)
-#define RC_FLAGS_BFPS_BFP4 (0x4 << 17)
-#define RC_FLAGS_BFPS_BFP5 (0x5 << 17)
-#define RC_FLAGS_BFPS_BFP6 (0x6 << 17)
-#define RC_FLAGS_BFPS_BFP7 (0x7 << 17)
-#define RC_FLAGS_BFPS_BFP01 (0x8 << 17)
-#define RC_FLAGS_BFPS_BFP23 (0x9 << 17)
-#define RC_FLAGS_BFPS_BFP45 (0xa << 17)
-#define RC_FLAGS_BFPS_BFP67 (0xb << 17)
-#define RC_FLAGS_BFPS_BFP07 (0xc << 17)
-#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
-#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
-
-#define RC_FLAGS_BFPP (0x1 << 21)
-#define RC_FLAGS_TEVC (0x1 << 22)
-#define RC_FLAGS_TEP (0x1 << 23)
-#define RC_FLAGS_AAL5 (0x0 << 24)
-#define RC_FLAGS_TRANSP (0x1 << 24)
-#define RC_FLAGS_TRANSC (0x2 << 24)
-#define RC_FLAGS_ML (0x1 << 27)
-#define RC_FLAGS_TRBRM (0x1 << 28)
-#define RC_FLAGS_PRI (0x1 << 29)
-#define RC_FLAGS_HOAM (0x1 << 30)
-#define RC_FLAGS_CRC10 (0x1 << 31)
-
-
-#define RAC 0x1c8
-#define RAM 0x1c4
-
-
-
-/************************************************************************
- * Then the datastructures that the DRIVER uses. *
- ************************************************************************/
-
-#define TXQ_NENTRIES 32
-#define RXRQ_NENTRIES 1024
-
-
-struct fs_vcc {
- int channo;
- wait_queue_head_t close_wait;
- struct sk_buff *last_skb;
-};
-
-
-struct queue {
- struct FS_QENTRY *sa, *ea;
- int offset;
-};
-
-struct freepool {
- int offset;
- int bufsize;
- int nr_buffers;
- int n;
-};
-
-
-struct fs_dev {
- struct fs_dev *next; /* other FS devices */
- int flags;
-
- unsigned char irq; /* IRQ */
- struct pci_dev *pci_dev; /* PCI stuff */
- struct atm_dev *atm_dev;
- struct timer_list timer;
-
- unsigned long hw_base; /* mem base address */
- void __iomem *base; /* Mapping of base address */
- int channo;
- unsigned long channel_mask;
-
- struct queue hp_txq, lp_txq, tx_relq, st_q;
- struct freepool rx_fp[FS_NR_FREE_POOLS];
- struct queue rx_rq[FS_NR_RX_QUEUES];
-
- int nchannels;
- struct atm_vcc **atm_vccs;
- void *tx_inuse;
- int ntxpckts;
-};
-
-
-
-
-/* Number of channesl that the FS50 supports. */
-#define FS50_CHANNEL_BITS 5
-#define FS50_NR_CHANNELS (1 << FS50_CHANNEL_BITS)
-
-
-#define FS_DEV(atm_dev) ((struct fs_dev *) (atm_dev)->dev_data)
-#define FS_VCC(atm_vcc) ((struct fs_vcc *) (atm_vcc)->dev_data)
-
-
-#define FS_IS50 0x1
-#define FS_IS155 0x2
-
-#define IS_FS50(dev) (dev->flags & FS_IS50)
-#define IS_FS155(dev) (dev->flags & FS_IS155)
-
-/* Within limits this is user-configurable. */
-/* Note: Currently the sum (10 -> 1k channels) is hardcoded in the driver. */
-#define FS155_VPI_BITS 4
-#define FS155_VCI_BITS 6
-
-#define FS155_CHANNEL_BITS (FS155_VPI_BITS + FS155_VCI_BITS)
-#define FS155_NR_CHANNELS (1 << FS155_CHANNEL_BITS)
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
deleted file mode 100644
index d0e67ec46216..000000000000
--- a/drivers/atm/horizon.c
+++ /dev/null
@@ -1,2853 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <asm/string.h>
-#include <asm/byteorder.h>
-
-#include "horizon.h"
-
-#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
-#define description_string "Madge ATM Horizon [Ultra] driver"
-#define version_string "1.2.1"
-
-static inline void __init show_version (void) {
- printk ("%s version %s\n", description_string, version_string);
-}
-
-/*
-
- CREDITS
-
- Driver and documentation by:
-
- Chris Aston Madge Networks
- Giuliano Procida Madge Networks
- Simon Benham Madge Networks
- Simon Johnson Madge Networks
- Various Others Madge Networks
-
- Some inspiration taken from other drivers by:
-
- Alexandru Cucos UTBv
- Kari Mettinen University of Helsinki
- Werner Almesberger EPFL LRC
-
- Theory of Operation
-
- I Hardware, detection, initialisation and shutdown.
-
- 1. Supported Hardware
-
- This driver should handle all variants of the PCI Madge ATM adapters
- with the Horizon chipset. These are all PCI cards supporting PIO, BM
- DMA and a form of MMIO (registers only, not internal RAM).
-
- The driver is only known to work with SONET and UTP Horizon Ultra
- cards at 155Mb/s. However, code is in place to deal with both the
- original Horizon and 25Mb/s operation.
-
- There are two revisions of the Horizon ASIC: the original and the
- Ultra. Details of hardware bugs are in section III.
-
- The ASIC version can be distinguished by chip markings but is NOT
- indicated by the PCI revision (all adapters seem to have PCI rev 1).
-
- I believe that:
-
- Horizon => Collage 25 PCI Adapter (UTP and STP)
- Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
- Ambassador x => Collage 155 PCI Server (completely different)
-
- Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
- have a Madge B154 plus glue logic serializer. I have also found a
- really ancient version of this with slightly different glue. It
- comes with the revision 0 (140-025-01) ASIC.
-
- Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
- output (UTP) or an HP HFBR 5205 output (SONET). It has either
- Madge's SAMBA framer or a SUNI-lite device (early versions). It
- comes with the revision 1 (140-027-01) ASIC.
-
- 2. Detection
-
- All Horizon-based cards present with the same PCI Vendor and Device
- IDs. The standard Linux 2.2 PCI API is used to locate any cards and
- to enable bus-mastering (with appropriate latency).
-
- ATM_LAYER_STATUS in the control register distinguishes between the
- two possible physical layers (25 and 155). It is not clear whether
- the 155 cards can also operate at 25Mbps. We rely on the fact that a
- card operates at 155 if and only if it has the newer Horizon Ultra
- ASIC.
-
- For 155 cards the two possible framers are probed for and then set
- up for loop-timing.
-
- 3. Initialisation
-
- The card is reset and then put into a known state. The physical
- layer is configured for normal operation at the appropriate speed;
- in the case of the 155 cards, the framer is initialised with
- line-based timing; the internal RAM is zeroed and the allocation of
- buffers for RX and TX is made; the Burnt In Address is read and
- copied to the ATM ESI; various policy settings for RX (VPI bits,
- unknown VCs, oam cells) are made. Ideally all policy items should be
- configurable at module load (if not actually on-demand), however,
- only the vpi vs vci bit allocation can be specified at insmod.
-
- 4. Shutdown
-
- This is in response to module_cleaup. No VCs are in use and the card
- should be idle; it is reset.
-
- II Driver software (as it should be)
-
- 0. Traffic Parameters
-
- The traffic classes (not an enumeration) are currently: ATM_NONE (no
- traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
- (compatible with everything). Together with (perhaps only some of)
- the following items they make up the traffic specification.
-
- struct atm_trafprm {
- unsigned char traffic_class; traffic class (ATM_UBR, ...)
- int max_pcr; maximum PCR in cells per second
- int pcr; desired PCR in cells per second
- int min_pcr; minimum PCR in cells per second
- int max_cdv; maximum CDV in microseconds
- int max_sdu; maximum SDU in bytes
- };
-
- Note that these denote bandwidth available not bandwidth used; the
- possibilities according to ATMF are:
-
- Real Time (cdv and max CDT given)
-
- CBR(pcr) pcr bandwidth always available
- rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
-
- Non Real Time
-
- nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
- UBR()
- ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too
-
- mbs is max burst size (bucket)
- pcr and scr have associated cdvt values
- mcr is like scr but has no cdtv
- cdtv may differ at each hop
-
- Some of the above items are qos items (as opposed to traffic
- parameters). We have nothing to do with qos. All except ABR can have
- their traffic parameters converted to GCRA parameters. The GCRA may
- be implemented as a (real-number) leaky bucket. The GCRA can be used
- in complicated ways by switches and in simpler ways by end-stations.
- It can be used both to filter incoming cells and shape out-going
- cells.
-
- ATM Linux actually supports:
-
- ATM_NONE() (no traffic in this direction)
- ATM_UBR(max_frame_size)
- ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
-
- 0 or ATM_MAX_PCR are used to indicate maximum available PCR
-
- A traffic specification consists of the AAL type and separate
- traffic specifications for either direction. In ATM Linux it is:
-
- struct atm_qos {
- struct atm_trafprm txtp;
- struct atm_trafprm rxtp;
- unsigned char aal;
- };
-
- AAL types are:
-
- ATM_NO_AAL AAL not specified
- ATM_AAL0 "raw" ATM cells
- ATM_AAL1 AAL1 (CBR)
- ATM_AAL2 AAL2 (VBR)
- ATM_AAL34 AAL3/4 (data)
- ATM_AAL5 AAL5 (data)
- ATM_SAAL signaling AAL
-
- The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
- it does not implement AAL 3/4 SAR and it has a different notion of
- "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
- supported by this driver.
-
- The Horizon has limited support for ABR (including UBR), VBR and
- CBR. Each TX channel has a bucket (containing up to 31 cell units)
- and two timers (PCR and SCR) associated with it that can be used to
- govern cell emissions and host notification (in the case of ABR this
- is presumably so that RM cells may be emitted at appropriate times).
- The timers may either be disabled or may be set to any of 240 values
- (determined by the clock crystal, a fixed (?) per-device divider, a
- configurable divider and a configurable timer preload value).
-
- At the moment only UBR and CBR are supported by the driver. VBR will
- be supported as soon as ATM for Linux supports it. ABR support is
- very unlikely as RM cell handling is completely up to the driver.
-
- 1. TX (TX channel setup and TX transfer)
-
- The TX half of the driver owns the TX Horizon registers. The TX
- component in the IRQ handler is the BM completion handler. This can
- only be entered when tx_busy is true (enforced by hardware). The
- other TX component can only be entered when tx_busy is false
- (enforced by driver). So TX is single-threaded.
-
- Apart from a minor optimisation to not re-select the last channel,
- the TX send component works as follows:
-
- Atomic test and set tx_busy until we succeed; we should implement
- some sort of timeout so that tx_busy will never be stuck at true.
-
- If no TX channel is set up for this VC we wait for an idle one (if
- necessary) and set it up.
-
- At this point we have a TX channel ready for use. We wait for enough
- buffers to become available then start a TX transmit (set the TX
- descriptor, schedule transfer, exit).
-
- The IRQ component handles TX completion (stats, free buffer, tx_busy
- unset, exit). We also re-schedule further transfers for the same
- frame if needed.
-
- TX setup in more detail:
-
- TX open is a nop, the relevant information is held in the hrz_vcc
- (vcc->dev_data) structure and is "cached" on the card.
-
- TX close gets the TX lock and clears the channel from the "cache".
-
- 2. RX (Data Available and RX transfer)
-
- The RX half of the driver owns the RX registers. There are two RX
- components in the IRQ handler: the data available handler deals with
- fresh data that has arrived on the card, the BM completion handler
- is very similar to the TX completion handler. The data available
- handler grabs the rx_lock and it is only released once the data has
- been discarded or completely transferred to the host. The BM
- completion handler only runs when the lock is held; the data
- available handler is locked out over the same period.
-
- Data available on the card triggers an interrupt. If the data is not
- suitable for our existing RX channels or we cannot allocate a buffer
- it is flushed. Otherwise an RX receive is scheduled. Multiple RX
- transfers may be scheduled for the same frame.
-
- RX setup in more detail:
-
- RX open...
- RX close...
-
- III Hardware Bugs
-
- 0. Byte vs Word addressing of adapter RAM.
-
- A design feature; see the .h file (especially the memory map).
-
- 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
-
- The host must not start a transmit direction transfer at a
- non-four-byte boundary in host memory. Instead the host should
- perform a byte, or a two byte, or one byte followed by two byte
- transfer in order to start the rest of the transfer on a four byte
- boundary. RX is OK.
-
- Simultaneous transmit and receive direction bus master transfers are
- not allowed.
-
- The simplest solution to these two is to always do PIO (never DMA)
- in the TX direction on the original Horizon. More complicated
- solutions are likely to hurt my brain.
-
- 2. Loss of buffer on close VC
-
- When a VC is being closed, the buffer associated with it is not
- returned to the pool. The host must store the reference to this
- buffer and when opening a new VC then give it to that new VC.
-
- The host intervention currently consists of stacking such a buffer
- pointer at VC close and checking the stack at VC open.
-
- 3. Failure to close a VC
-
- If a VC is currently receiving a frame then closing the VC may fail
- and the frame continues to be received.
-
- The solution is to make sure any received frames are flushed when
- ready. This is currently done just before the solution to 2.
-
- 4. PCI bus (original Horizon only, fixed in Ultra)
-
- Reading from the data port prior to initialisation will hang the PCI
- bus. Just don't do that then! We don't.
-
- IV To Do List
-
- . Timer code may be broken.
-
- . Allow users to specify buffer allocation split for TX and RX.
-
- . Deal once and for all with buggy VC close.
-
- . Handle interrupted and/or non-blocking operations.
-
- . Change some macros to functions and move from .h to .c.
-
- . Try to limit the number of TX frames each VC may have queued, in
- order to reduce the chances of TX buffer exhaustion.
-
- . Implement VBR (bucket and timers not understood) and ABR (need to
- do RM cells manually); also no Linux support for either.
-
- . Implement QoS changes on open VCs (involves extracting parts of VC open
- and close into separate functions and using them to make changes).
-
-*/
-
-/********** globals **********/
-
-static void do_housekeeping (struct timer_list *t);
-
-static unsigned short debug = 0;
-static unsigned short vpi_bits = 0;
-static int max_tx_size = 9000;
-static int max_rx_size = 9000;
-static unsigned char pci_lat = 0;
-
-/********** access functions **********/
-
-/* Read / Write Horizon registers */
-static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
- outl (cpu_to_le32 (data), dev->iobase + reg);
-}
-
-static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
- return le32_to_cpu (inl (dev->iobase + reg));
-}
-
-static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
- outw (cpu_to_le16 (data), dev->iobase + reg);
-}
-
-static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
- return le16_to_cpu (inw (dev->iobase + reg));
-}
-
-static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- outsb (dev->iobase + reg, addr, len);
-}
-
-static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- insb (dev->iobase + reg, addr, len);
-}
-
-/* Read / Write to a given address in Horizon buffer memory.
- Interrupts must be disabled between the address register and data
- port accesses as these must form an atomic operation. */
-static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
- // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
- // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-/********** specialised access functions **********/
-
-/* RX */
-
-static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
- return;
-}
-
-static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
- ;
- return;
-}
-
-static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
- return;
-}
-
-static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
- ;
- return;
-}
-
-/* TX */
-
-static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
- wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
- return;
-}
-
-/* Update or query one configuration parameter of a particular channel. */
-
-static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
- wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
- chan * TX_CHANNEL_CONFIG_MULT | mode);
- wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
- return;
-}
-
-/********** dump functions **********/
-
-static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- unsigned char * data = skb->data;
- PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
- for (i=0; i<skb->len && i < 256;i++)
- PRINTDM (DBG_DATA, "%02x ", data[i]);
- PRINTDE (DBG_DATA,"");
-#else
- (void) prefix;
- (void) vc;
- (void) skb;
-#endif
- return;
-}
-
-static inline void dump_regs (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
- PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
- PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
- PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
-#else
- (void) dev;
-#endif
- return;
-}
-
-static inline void dump_framer (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- PRINTDB (DBG_REGS, "framer registers:");
- for (i = 0; i < 0x10; ++i)
- PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
- PRINTDE (DBG_REGS,"");
-#else
- (void) dev;
-#endif
- return;
-}
-
-/********** VPI/VCI <-> (RX) channel conversions **********/
-
-/* RX channels are 10 bit integers, these fns are quite paranoid */
-
-static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
- unsigned short vci_bits = 10 - vpi_bits;
- if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
- *channel = vpi<<vci_bits | vci;
- return *channel ? 0 : -EINVAL;
- }
- return -EINVAL;
-}
-
-/********** decode RX queue entries **********/
-
-static inline u16 rx_q_entry_to_length (u32 x) {
- return x & RX_Q_ENTRY_LENGTH_MASK;
-}
-
-static inline u16 rx_q_entry_to_rx_channel (u32 x) {
- return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
-}
-
-/* Cell Transmit Rate Values
- *
- * the cell transmit rate (cells per sec) can be set to a variety of
- * different values by specifying two parameters: a timer preload from
- * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
- * an exponent from 0 to 14; the special value 15 disables the timer).
- *
- * cellrate = baserate / (preload * 2^divider)
- *
- * The maximum cell rate that can be specified is therefore just the
- * base rate. Halving the preload is equivalent to adding 1 to the
- * divider and so values 1 to 8 of the preload are redundant except
- * in the case of a maximal divider (14).
- *
- * Given a desired cell rate, an algorithm to determine the preload
- * and divider is:
- *
- * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
- * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
- * if x <= 16 then set p = x, d = 0 (high rates), done
- * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
- * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
- * we find the range (n will be between 1 and 14), set d = n
- * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
- *
- * The algorithm used below is a minor variant of the above.
- *
- * The base rate is derived from the oscillator frequency (Hz) using a
- * fixed divider:
- *
- * baserate = freq / 32 in the case of some Unknown Card
- * baserate = freq / 8 in the case of the Horizon 25
- * baserate = freq / 8 in the case of the Horizon Ultra 155
- *
- * The Horizon cards have oscillators and base rates as follows:
- *
- * Card Oscillator Base Rate
- * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq)
- * Horizon 25 32 MHz 4 MHz
- * Horizon Ultra 155 40 MHz 5 MHz
- *
- * The following defines give the base rates in Hz. These were
- * previously a factor of 100 larger, no doubt someone was using
- * cps*100.
- */
-
-#define BR_UKN 1031250l
-#define BR_HRZ 4000000l
-#define BR_ULT 5000000l
-
-// d is an exponent
-#define CR_MIND 0
-#define CR_MAXD 14
-
-// p ranges from 1 to a power of 2
-#define CR_MAXPEXP 4
-
-static int make_rate (const hrz_dev * dev, u32 c, rounding r,
- u16 * bits, unsigned int * actual)
-{
- // note: rounding the rate down means rounding 'p' up
- const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
-
- u32 div = CR_MIND;
- u32 pre;
-
- // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
- // the tests below. We could think harder about exact possibilities
- // of failure...
-
- unsigned long br_man = br;
- unsigned int br_exp = 0;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
- r == round_up ? "up" : r == round_down ? "down" : "nearest");
-
- // avoid div by zero
- if (!c) {
- PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
- return -EINVAL;
- }
-
- while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
- br_man = br_man >> 1;
- ++br_exp;
- }
- // (br >>br_exp) <<br_exp == br and
- // br_exp <= CR_MAXPEXP+CR_MIND
-
- if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+MIND))
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- default: /* round_up */
- pre = br/(c<<div);
- // but p must be non-zero
- if (!pre)
- return -EINVAL;
- }
- PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
- goto got_it;
- }
-
- // at this point we have
- // d == MIND and (c << (MAXPEXP+MIND)) < B
- while (div < CR_MAXD) {
- div++;
- if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+d))
- // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
- // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
- // MAXP/2 < B/c2^d <= MAXP
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- break;
- default: /* round_up */
- pre = br/(c<<div);
- }
- PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
- goto got_it;
- }
- }
- // at this point we have
- // d == MAXD and (c << (MAXPEXP+MAXD)) < B
- // but we cannot go any higher
- // take care of rounding
- if (r == round_down)
- return -EINVAL;
- pre = 1 << CR_MAXPEXP;
- PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
-got_it:
- // paranoia
- if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
- PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
- div, pre);
- return -EINVAL;
- } else {
- if (bits)
- *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
- if (actual) {
- *actual = DIV_ROUND_UP(br, pre<<div);
- PRINTD (DBG_QOS, "actual rate: %u", *actual);
- }
- return 0;
- }
-}
-
-static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
- u16 * bit_pattern, unsigned int * actual) {
- unsigned int my_actual;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
- c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
-
- if (!actual)
- // actual rate is not returned
- actual = &my_actual;
-
- if (make_rate (dev, c, round_nearest, bit_pattern, actual))
- // should never happen as round_nearest always succeeds
- return -1;
-
- if (c - tol <= *actual && *actual <= c + tol)
- // within tolerance
- return 0;
- else
- // intolerant, try rounding instead
- return make_rate (dev, c, r, bit_pattern, actual);
-}
-
-/********** Listen on a VC **********/
-
-static int hrz_open_rx (hrz_dev * dev, u16 channel) {
- // is there any guarantee that we don't get two simulataneous
- // identical calls of this function from different processes? yes
- // rate_lock
- unsigned long flags;
- u32 channel_type; // u16?
-
- u16 buf_ptr = RX_CHANNEL_IDLE;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
-
- PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // very serious error, should never occur
- if (channel_type != RX_CHANNEL_DISABLED) {
- PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
- return -EBUSY; // clean up?
- }
-
- // Give back spare buffer
- if (dev->noof_spare_buffers) {
- buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
- PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
- // should never occur
- if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
- // but easy to recover from
- PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
- buf_ptr = RX_CHANNEL_IDLE;
- }
- } else {
- PRINTD (DBG_VCC, "using IDLE buffer pointer");
- }
-
- // Channel is currently disabled so change its status to idle
-
- // do we really need to save the flags again?
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- wr_mem (dev, &rx_desc->wr_buf_type,
- buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
- if (buf_ptr != RX_CHANNEL_IDLE)
- wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // rxer->rate = make_rate (qos->peak_cells);
-
- PRINTD (DBG_FLOW, "hrz_open_rx ok");
-
- return 0;
-}
-
-#if 0
-/********** change vc rate for a given vc **********/
-
-static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
- rxer->rate = make_rate (qos->peak_cells);
-}
-#endif
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-static void hrz_kfree_skb (struct sk_buff * skb) {
- if (ATM_SKB(skb)->vcc->pop) {
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- } else {
- dev_kfree_skb_any (skb);
- }
-}
-
-/********** cancel listen on a VC **********/
-
-static void hrz_close_rx (hrz_dev * dev, u16 vc) {
- unsigned long flags;
-
- u32 value;
-
- u32 r1, r2;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
-
- int was_idle = 0;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- if (value == RX_CHANNEL_DISABLED) {
- // I suppose this could happen once we deal with _NONE traffic properly
- PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
- return;
- }
- if (value == RX_CHANNEL_IDLE)
- was_idle = 1;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- for (;;) {
- wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
-
- if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
- break;
-
- was_idle = 0;
- }
-
- if (was_idle) {
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- return;
- }
-
- WAIT_FLUSH_RX_COMPLETE(dev);
-
- // XXX Is this all really necessary? We can rely on the rx_data_av
- // handler to discard frames that remain queued for delivery. If the
- // worry is that immediately reopening the channel (perhaps by a
- // different process) may cause some data to be mis-delivered then
- // there may still be a simpler solution (such as busy-waiting on
- // rx_busy once the channel is disabled or before a new one is
- // opened - does this leave any holes?). Arguably setting up and
- // tearing down the TX and RX halves of each virtual circuit could
- // most safely be done within ?x_busy protected regions.
-
- // OK, current changes are that Simon's marker is disabled and we DO
- // look for NULL rxer elsewhere. The code here seems flush frames
- // and then remember the last dead cell belonging to the channel
- // just disabled - the cell gets relinked at the next vc_open.
- // However, when all VCs are closed or only a few opened there are a
- // handful of buffers that are unusable.
-
- // Does anyone feel like documenting spare_buffers properly?
- // Does anyone feel like fixing this in a nicer way?
-
- // Flush any data which is left in the channel
- for (;;) {
- // Change the rx channel port to something different to the RX
- // channel we are trying to close to force Horizon to flush the rx
- // channel read and write pointers.
-
- u16 other = vc^(RX_CHANS/2);
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r1 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- // Select this RX channel. Flush doesn't seem to work unless we
- // select an RX channel before hand
-
- SELECT_RX_CHANNEL (dev, vc);
- WAIT_UPDATE_COMPLETE (dev);
-
- // Attempt to flush a frame on this RX channel
-
- FLUSH_RX_CHANNEL (dev, vc);
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- // Force Horizon to flush rx channel read and write pointers as before
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r2 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
-
- if (r1 == r2) {
- dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
- break;
- }
- }
-
-#if 0
- {
- rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];
- rx_q_entry * rd_ptr = dev->rx_q_entry;
-
- PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);
-
- while (rd_ptr != wr_ptr) {
- u32 x = rd_mem (dev, (HDW *) rd_ptr);
-
- if (vc == rx_q_entry_to_rx_channel (x)) {
- x |= SIMONS_DODGEY_MARKER;
-
- PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");
-
- wr_mem (dev, (HDW *) rd_ptr, x);
- }
-
- if (rd_ptr == dev->rx_q_wrap)
- rd_ptr = dev->rx_q_reset;
- else
- rd_ptr++;
- }
- }
-#endif
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- return;
-}
-
-/********** schedule RX transfers **********/
-
-// Note on tail recursion: a GCC developer said that it is not likely
-// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
-// are sure it does as you may otherwise overflow the kernel stack.
-
-// giving this fn a return value would help GCC, allegedly
-
-static void rx_schedule (hrz_dev * dev, int irq) {
- unsigned int rx_bytes;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes waiting for RX transfer
- rx_bytes = dev->rx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {
- PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- clear_bit (rx_busy, &dev->flags);
- hrz_kfree_skb (dev->rx_skb);
- return;
- }
- }
-#endif
-
- // this code follows the TX code but (at the moment) there is only
- // one region - the skb itself. I don't know if this will change,
- // but it doesn't hurt to have the code here, disabled.
-
- if (rx_bytes) {
- // start next transfer within same region
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // rx_bytes == 0 -- we're between regions
- // regions remaining to transfer
-#if 0
- unsigned int rx_regions = dev->rx_regions;
-#else
- unsigned int rx_regions = 0;
-#endif
-
- if (rx_regions) {
-#if 0
- // start a new region
- dev->rx_addr = dev->rx_iovec->iov_base;
- rx_bytes = dev->rx_iovec->iov_len;
- ++dev->rx_iovec;
- dev->rx_regions = rx_regions - 1;
-
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(full region)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(start multi region)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
-#endif
- } else {
- // rx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->rx_skb;
- // dev->rx_iovec = 0;
-
- FLUSH_RX_CHANNEL (dev, dev->rx_channel);
-
- dump_skb ("<<<", dev->rx_channel, skb);
-
- PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
-
- {
- struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
- // VC layer stats
- atomic_inc(&vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- vcc->push (vcc, skb);
- }
- }
- }
-
- // note: writing RX_COUNT clears any interrupt condition
- if (rx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
- } else {
- wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
- }
- dev->rx_addr += rx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- // allow another RX thread to start
- YELLOW_LED_ON(dev);
- clear_bit (rx_busy, &dev->flags);
- PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return rx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle RX bus master complete events **********/
-
-static void rx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (rx_busy, &dev->flags)) {
- rx_schedule (dev, 1);
- } else {
- PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** (queue to) become the next TX thread **********/
-
-static int tx_hold (hrz_dev * dev) {
- PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
- wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
- PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
- if (signal_pending (current))
- return -1;
- PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
- return 0;
-}
-
-/********** allow another TX thread to start **********/
-
-static inline void tx_release (hrz_dev * dev) {
- clear_bit (tx_busy, &dev->flags);
- PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
- wake_up_interruptible (&dev->tx_queue);
-}
-
-/********** schedule TX transfers **********/
-
-static void tx_schedule (hrz_dev * const dev, int irq) {
- unsigned int tx_bytes;
-
- int append_desc = 0;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes in current region waiting for TX transfer
- tx_bytes = dev->tx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {
- PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- tx_release (dev);
- hrz_kfree_skb (dev->tx_skb);
- return;
- }
- }
-#endif
-
- if (tx_bytes) {
- // start next transfer within same region
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
- if (!dev->tx_iovec) {
- // end of last region
- append_desc = 1;
- }
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_bytes == 0 -- we're between regions
- // regions remaining to transfer
- unsigned int tx_regions = dev->tx_regions;
-
- if (tx_regions) {
- // start a new region
- dev->tx_addr = dev->tx_iovec->iov_base;
- tx_bytes = dev->tx_iovec->iov_len;
- ++dev->tx_iovec;
- dev->tx_regions = tx_regions - 1;
-
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(full region)");
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->tx_skb;
- dev->tx_iovec = NULL;
-
- // VC layer stats
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the skb
- hrz_kfree_skb (skb);
- }
- }
-
- // note: writing TX_COUNT clears any interrupt condition
- if (tx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
- } else {
- wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
- append_desc
- ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
- : tx_bytes);
- }
- dev->tx_addr += tx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- YELLOW_LED_ON(dev);
- tx_release (dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return tx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle TX bus master complete events **********/
-
-static void tx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (tx_busy, &dev->flags)) {
- tx_schedule (dev, 1);
- } else {
- PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** move RX Q pointer to next item in circular buffer **********/
-
-// called only from IRQ sub-handler
-static u32 rx_queue_entry_next (hrz_dev * dev) {
- u32 rx_queue_entry;
- spin_lock (&dev->mem_lock);
- rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
- if (dev->rx_q_entry == dev->rx_q_wrap)
- dev->rx_q_entry = dev->rx_q_reset;
- else
- dev->rx_q_entry++;
- wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
- spin_unlock (&dev->mem_lock);
- return rx_queue_entry;
-}
-
-/********** handle RX data received by device **********/
-
-// called from IRQ handler
-static void rx_data_av_handler (hrz_dev * dev) {
- u32 rx_queue_entry;
- u32 rx_queue_entry_flags;
- u16 rx_len;
- u16 rx_channel;
-
- PRINTD (DBG_FLOW, "hrz_data_av_handler");
-
- // try to grab rx lock (not possible during RX bus mastering)
- if (test_and_set_bit (rx_busy, &dev->flags)) {
- PRINTD (DBG_RX, "locked out of rx lock");
- return;
- }
- PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
- // lock is cleared if we fail now, o/w after bus master completion
-
- YELLOW_LED_OFF(dev);
-
- rx_queue_entry = rx_queue_entry_next (dev);
-
- rx_len = rx_q_entry_to_length (rx_queue_entry);
- rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
-
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- SELECT_RX_CHANNEL (dev, rx_channel);
-
- PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
- rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
-
- if (!rx_len) {
- // (at least) bus-mastering breaks if we try to handle a
- // zero-length frame, besides AAL5 does not support them
- PRINTK (KERN_ERR, "zero-length frame!");
- rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
- }
-
- if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
- PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
- }
- if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
- struct atm_vcc * atm_vcc;
-
- PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
-
- atm_vcc = dev->rxer[rx_channel];
- // if no vcc is assigned to this channel, we should drop the frame
- // (is this what SIMONS etc. was trying to achieve?)
-
- if (atm_vcc) {
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
-
- if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
-
- struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
- if (skb) {
- // remember this so we can push it later
- dev->rx_skb = skb;
- // remember this so we can flush it later
- dev->rx_channel = rx_channel;
-
- // prepare socket buffer
- skb_put (skb, rx_len);
- ATM_SKB(skb)->vcc = atm_vcc;
-
- // simple transfer
- // dev->rx_regions = 0;
- // dev->rx_iovec = 0;
- dev->rx_bytes = rx_len;
- dev->rx_addr = skb->data;
- PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
- skb->data, rx_len);
-
- // do the business
- rx_schedule (dev, 0);
- return;
-
- } else {
- PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
- }
-
- } else {
- PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
- // do we count this?
- }
-
- } else {
- PRINTK (KERN_WARNING, "dropped over-size frame");
- // do we count this?
- }
-
- } else {
- PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
- // do we count this?
- }
-
- } else {
- // Wait update complete ? SPONG
- }
-
- // RX was aborted
- YELLOW_LED_ON(dev);
-
- FLUSH_RX_CHANNEL (dev,rx_channel);
- clear_bit (rx_busy, &dev->flags);
-
- return;
-}
-
-/********** interrupt handler **********/
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id)
-{
- hrz_dev *dev = dev_id;
- u32 int_source;
- unsigned int irq_ok;
-
- PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
-
- // definitely for us
- irq_ok = 0;
- while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
- & INTERESTING_INTERRUPTS)) {
- // In the interests of fairness, the handlers below are
- // called in sequence and without immediate return to the head of
- // the while loop. This is only of issue for slow hosts (or when
- // debugging messages are on). Really slow hosts may find a fast
- // sender keeps them permanently in the IRQ handler. :(
-
- // (only an issue for slow hosts) RX completion goes before
- // rx_data_av as the former implies rx_busy and so the latter
- // would just abort. If it reschedules another transfer
- // (continuing the same frame) then it will not clear rx_busy.
-
- // (only an issue for slow hosts) TX completion goes before RX
- // data available as it is a much shorter routine - there is the
- // chance that any further transfers it schedules will be complete
- // by the time of the return to the head of the while loop
-
- if (int_source & RX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
- rx_bus_master_complete_handler (dev);
- }
- if (int_source & TX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
- tx_bus_master_complete_handler (dev);
- }
- if (int_source & RX_DATA_AV) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
- rx_data_av_handler (dev);
- }
- }
- if (irq_ok) {
- PRINTD (DBG_IRQ, "work done: %u", irq_ok);
- } else {
- PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
- }
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
- if (irq_ok)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-/********** housekeeping **********/
-
-static void do_housekeeping (struct timer_list *t) {
- // just stats at the moment
- hrz_dev * dev = from_timer(dev, t, housekeeping);
-
- // collect device-specific (not driver/atm-linux) stats here
- dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
- dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
- dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
- dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
-
- mod_timer (&dev->housekeeping, jiffies + HZ/10);
-
- return;
-}
-
-/********** find an idle channel for TX and set it up **********/
-
-// called with tx_busy set
-static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
- unsigned short idle_channels;
- short tx_channel = -1;
- unsigned int spin_count;
- PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
-
- // better would be to fail immediately, the caller can then decide whether
- // to wait or drop (depending on whether this is UBR etc.)
- spin_count = 0;
- while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
- PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
- // delay a bit here
- if (++spin_count > 100) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
- return -EBUSY;
- }
- }
-
- // got an idle channel
- {
- // tx_idle ensures we look for idle channels in RR order
- int chan = dev->tx_idle;
-
- int keep_going = 1;
- while (keep_going) {
- if (idle_channels & (1<<chan)) {
- tx_channel = chan;
- keep_going = 0;
- }
- ++chan;
- if (chan == TX_CHANS)
- chan = 0;
- }
-
- dev->tx_idle = chan;
- }
-
- // set up the channel we found
- {
- // Initialise the cell header in the transmit channel descriptor
- // a.k.a. prepare the channel and remember that we have done so.
-
- tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
- u32 rd_ptr;
- u32 wr_ptr;
- u16 channel = vcc->channel;
-
- unsigned long flags;
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- // Update the transmit channel record.
- dev->tx_channel_record[tx_channel] = channel;
-
- // xBR channel
- update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
- vcc->tx_xbr_bits);
-
- // Update the PCR counter preload value etc.
- update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
- vcc->tx_pcr_bits);
-
-#if 0
- if (vcc->tx_xbr_bits == VBR_RATE_TYPE) {
- // SCR timer
- update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS,
- vcc->tx_scr_bits);
-
- // Bucket size...
- update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS,
- vcc->tx_bucket_bits);
-
- // ... and fullness
- update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS,
- vcc->tx_bucket_bits);
- }
-#endif
-
- // Initialise the read and write buffer pointers
- rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
- wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
-
- // idle TX channels should have identical pointers
- if (rd_ptr != wr_ptr) {
- PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
- // spin_unlock... return -E...
- // I wonder if gcc would get rid of one of the pointer aliases
- }
- PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
- rd_ptr, wr_ptr);
-
- switch (vcc->aal) {
- case aal0:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
- rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
- wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
- break;
- case aal34:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
- rd_ptr |= CHANNEL_TYPE_AAL3_4;
- wr_ptr |= CHANNEL_TYPE_AAL3_4;
- break;
- case aal5:
- rd_ptr |= CHANNEL_TYPE_AAL5;
- wr_ptr |= CHANNEL_TYPE_AAL5;
- // Initialise the CRC
- wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
- break;
- }
-
- wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
- wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
-
- // Write the Cell Header
- // Payload Type, CLP and GFC would go here if non-zero
- wr_mem (dev, &tx_desc->cell_header, channel);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- }
-
- return tx_channel;
-}
-
-/********** send a frame **********/
-
-static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- unsigned int spin_count;
- int free_buffers;
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
-
- u32 buffers_required;
-
- /* signed for error return */
- short tx_channel;
-
- PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
- channel, skb->data, skb->len);
-
- dump_skb (">>>", channel, skb);
-
- if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
- PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- // don't understand this
- ATM_SKB(skb)->vcc = atm_vcc;
-
- if (skb->len > atm_vcc->qos.txtp.max_sdu) {
- PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- if (!channel) {
- PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
-#if 0
- {
- // where would be a better place for this? housekeeping?
- u16 status;
- pci_read_config_word (dev->pci_dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_REC_MASTER_ABORT) {
- PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)");
- status &= ~PCI_STATUS_REC_MASTER_ABORT;
- pci_write_config_word (dev->pci_dev, PCI_STATUS, status);
- if (test_bit (tx_busy, &dev->flags)) {
- hrz_kfree_skb (dev->tx_skb);
- tx_release (dev);
- }
- }
- }
-#endif
-
-#ifdef DEBUG_HORIZON
- /* wey-hey! */
- if (channel == 1023) {
- unsigned int i;
- unsigned short d = 0;
- char * s = skb->data;
- if (*s++ == 'D') {
- for (i = 0; i < 4; ++i)
- d = (d << 4) | hex_to_bin(*s++);
- PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
- }
- }
-#endif
-
- // wait until TX is free and grab lock
- if (tx_hold (dev)) {
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
-
- // Wait for enough space to be available in transmit buffer memory.
-
- // should be number of cells needed + 2 (according to hardware docs)
- // = ((framelen+8)+47) / 48 + 2
- // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX
- buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
-
- // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
- spin_count = 0;
- while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
- PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
- free_buffers, buffers_required);
- // what is the appropriate delay? implement a timeout? (depending on line speed?)
- // mdelay (1);
- // what happens if we kill (current_pid, SIGKILL) ?
- schedule();
- if (++spin_count > 1000) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
- free_buffers, buffers_required);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
- }
-
- // Select a channel to transmit the frame on.
- if (channel == dev->last_vc) {
- PRINTD (DBG_TX, "last vc hack: hit");
- tx_channel = dev->tx_last;
- } else {
- PRINTD (DBG_TX, "last vc hack: miss");
- // Are we currently transmitting this VC on one of the channels?
- for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
- if (dev->tx_channel_record[tx_channel] == channel) {
- PRINTD (DBG_TX, "vc already on channel: hit");
- break;
- }
- if (tx_channel == TX_CHANS) {
- PRINTD (DBG_TX, "vc already on channel: miss");
- // Find and set up an idle channel.
- tx_channel = setup_idle_tx_channel (dev, vcc);
- if (tx_channel < 0) {
- PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
- tx_release (dev);
- return tx_channel;
- }
- }
-
- PRINTD (DBG_TX, "got channel");
- SELECT_TX_CHANNEL(dev, tx_channel);
-
- dev->last_vc = channel;
- dev->tx_last = tx_channel;
- }
-
- PRINTD (DBG_TX, "using channel %u", tx_channel);
-
- YELLOW_LED_OFF(dev);
-
- // TX start transfer
-
- {
- unsigned int tx_len = skb->len;
- unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
- // remember this so we can free it later
- dev->tx_skb = skb;
-
- if (tx_iovcnt) {
- // scatter gather transfer
- dev->tx_regions = tx_iovcnt;
- dev->tx_iovec = NULL; /* @@@ needs rewritten */
- dev->tx_bytes = 0;
- PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
- skb->data, tx_len);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -EIO;
- } else {
- // simple transfer
- dev->tx_regions = 0;
- dev->tx_iovec = NULL;
- dev->tx_bytes = tx_len;
- dev->tx_addr = skb->data;
- PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
- skb->data, tx_len);
- }
-
- // and do the business
- tx_schedule (dev, 0);
-
- }
-
- return 0;
-}
-
-/********** reset a card **********/
-
-static void hrz_reset (const hrz_dev * dev) {
- u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // why not set RESET_HORIZON to one and wait for the card to
- // reassert that bit as zero? Like so:
- control_0_reg = control_0_reg & RESET_HORIZON;
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
- while (control_0_reg & RESET_HORIZON)
- control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // old reset code retained:
- wr_regl (dev, CONTROL_0_REG, control_0_reg |
- RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
- // just guessing here
- udelay (1000);
-
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
-}
-
-/********** read the burnt in address **********/
-
-static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
-{
- wr_regl (dev, CONTROL_0_REG, ctrl);
- udelay (5);
-}
-
-static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
-{
- // DI must be valid around rising SK edge
- WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
- WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
-}
-
-static u16 read_bia(const hrz_dev *dev, u16 addr)
-{
- u32 ctrl = rd_regl (dev, CONTROL_0_REG);
-
- const unsigned int addr_bits = 6;
- const unsigned int data_bits = 16;
-
- unsigned int i;
-
- u16 res;
-
- ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
- WRITE_IT_WAIT(dev, ctrl);
-
- // wake Serial EEPROM and send 110 (READ) command
- ctrl |= (SEEPROM_CS | SEEPROM_DI);
- CLOCK_IT(dev, ctrl);
-
- ctrl |= SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- ctrl &= ~SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- for (i=0; i<addr_bits; i++) {
- if (addr & (1 << (addr_bits-1)))
- ctrl |= SEEPROM_DI;
- else
- ctrl &= ~SEEPROM_DI;
-
- CLOCK_IT(dev, ctrl);
-
- addr = addr << 1;
- }
-
- // we could check that we have DO = 0 here
- ctrl &= ~SEEPROM_DI;
-
- res = 0;
- for (i=0;i<data_bits;i++) {
- res = res >> 1;
-
- CLOCK_IT(dev, ctrl);
-
- if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
- res |= (1 << (data_bits-1));
- }
-
- ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
- WRITE_IT_WAIT(dev, ctrl);
-
- return res;
-}
-
-/********** initialise a card **********/
-
-static int hrz_init(hrz_dev *dev)
-{
- int onefivefive;
-
- u16 chan;
-
- int buff_count;
-
- HDW * mem;
-
- cell_buf * tx_desc;
- cell_buf * rx_desc;
-
- u32 ctrl;
-
- ctrl = rd_regl (dev, CONTROL_0_REG);
- PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
- onefivefive = ctrl & ATM_LAYER_STATUS;
-
- if (onefivefive)
- printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
- else
- printk (DEV_LABEL ": Horizon (at 25 MBps)");
-
- printk (":");
- // Reset the card to get everything in a known state
-
- printk (" reset");
- hrz_reset (dev);
-
- // Clear all the buffer memory
-
- printk (" clearing memory");
-
- for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
- wr_mem (dev, mem, 0);
-
- printk (" tx channels");
-
- // All transmit eight channels are set up as AAL5 ABR channels with
- // a 16us cell spacing. Why?
-
- // Channel 0 gets the free buffer at 100h, channel 1 gets the free
- // buffer at 110h etc.
-
- for (chan = 0; chan < TX_CHANS; ++chan) {
- tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
- cell_buf * buf = &memmap->inittxbufs[chan];
-
- // initialise the read and write buffer pointers
- wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
- wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
-
- // set the status of the initial buffers to empty
- wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
- }
-
- // Use space bufn3 at the moment for tx buffers
-
- printk (" tx buffers");
-
- tx_desc = memmap->bufn3;
-
- wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
- wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
- tx_desc++;
- }
-
- wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the transmit free buffer count
- wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
-
- printk (" rx channels");
-
- // Initialise all of the receive channels to be AAL5 disabled with
- // an interrupt threshold of 0
-
- for (chan = 0; chan < RX_CHANS; ++chan) {
- rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
-
- wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
- }
-
- printk (" rx buffers");
-
- // Use space bufn4 at the moment for rx buffers
-
- rx_desc = memmap->bufn4;
-
- wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
- wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
-
- rx_desc++;
- }
-
- wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the receive free buffer count
- wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
-
- // Initialize Horizons registers
-
- // TX config
- wr_regw (dev, TX_CONFIG_OFF,
- ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
-
- // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
- wr_regw (dev, RX_CONFIG_OFF,
- DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
-
- // RX line config
- wr_regw (dev, RX_LINE_CONFIG_OFF,
- LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
-
- // Set the max AAL5 cell count to be just enough to contain the
- // largest AAL5 frame that the user wants to receive
- wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
- DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
-
- // Enable receive
- wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
-
- printk (" control");
-
- // Drive the OE of the LEDs then turn the green LED on
- ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // Test for a 155-capable card
-
- if (onefivefive) {
- // Select 155 mode... make this a choice (or: how do we detect
- // external line speed and switch?)
- ctrl |= ATM_LAYER_SELECT;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // test SUNI-lite vs SAMBA
-
- // Register 0x00 in the SUNI will have some of bits 3-7 set, and
- // they will always be zero for the SAMBA. Ha! Bloody hardware
- // engineers. It'll never work.
-
- if (rd_framer (dev, 0) & 0x00f0) {
- // SUNI
- printk (" SUNI");
-
- // Reset, just in case
- wr_framer (dev, 0x00, 0x0080);
- wr_framer (dev, 0x00, 0x0000);
-
- // Configure transmit FIFO
- wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
-
- // Set line timed mode
- wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
- } else {
- // SAMBA
- printk (" SAMBA");
-
- // Reset, just in case
- wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
- wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
-
- // Turn off diagnostic loopback and enable line-timed mode
- wr_framer (dev, 0, 0x0002);
-
- // Turn on transmit outputs
- wr_framer (dev, 2, 0x0B80);
- }
- } else {
- // Select 25 mode
- ctrl &= ~ATM_LAYER_SELECT;
-
- // Madge B154 setup
- // none required?
- }
-
- printk (" LEDs");
-
- GREEN_LED_ON(dev);
- YELLOW_LED_ON(dev);
-
- printk (" ESI=");
-
- {
- u16 b = 0;
- int i;
- u8 * esi = dev->atm_dev->esi;
-
- // in the card I have, EEPROM
- // addresses 0, 1, 2 contain 0
- // addresess 5, 6 etc. contain ffff
- // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
- // the read_bia routine gets the BIA in Ethernet bit order
-
- for (i=0; i < ESI_LEN; ++i) {
- if (i % 2 == 0)
- b = read_bia (dev, i/2 + 2);
- else
- b = b >> 8;
- esi[i] = b & 0xFF;
- printk ("%02x", esi[i]);
- }
- }
-
- // Enable RX_Q and ?X_COMPLETE interrupts only
- wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
- printk (" IRQ on");
-
- printk (".\n");
-
- return onefivefive;
-}
-
-/********** check max_sdu **********/
-
-static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
- PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
-
- switch (aal) {
- case aal0:
- if (!(tp->max_sdu)) {
- PRINTD (DBG_QOS, "defaulting max_sdu");
- tp->max_sdu = ATM_AAL0_SDU;
- } else if (tp->max_sdu != ATM_AAL0_SDU) {
- PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
- return -EINVAL;
- }
- break;
- case aal34:
- if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = ATM_MAX_AAL34_PDU;
- }
- break;
- case aal5:
- if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = max_frame_size;
- }
- break;
- }
- return 0;
-}
-
-/********** check pcr **********/
-
-// something like this should be part of ATM Linux
-static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
- // we are assuming non-UBR, and non-special values of pcr
- if (tp->min_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
- else if (tp->min_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative min_pcr");
- else if (tp->min_pcr && tp->min_pcr > pcr)
- PRINTD (DBG_QOS, "pcr less than min_pcr");
- else
- // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
- // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
- // [this would get rid of next two conditionals]
- if ((0) && tp->max_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
- else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative max_pcr");
- else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
- PRINTD (DBG_QOS, "pcr greater than max_pcr");
- else {
- // each limit unspecified or not violated
- PRINTD (DBG_QOS, "xBR(pcr) OK");
- return 0;
- }
- PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
- pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
- return -EINVAL;
-}
-
-/********** open VC **********/
-
-static int hrz_open (struct atm_vcc *atm_vcc)
-{
- int error;
- u16 channel;
-
- struct atm_qos * qos;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc vcc;
- hrz_vcc * vccp; // allocated late
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
- PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
-
-#ifdef ATM_VPI_UNSPEC
- // UNSPEC is deprecated, remove this code eventually
- if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
- PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
- return -EINVAL;
- }
-#endif
-
- error = vpivci_to_channel (&channel, vpi, vci);
- if (error) {
- PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
- return error;
- }
-
- vcc.channel = channel;
- // max speed for the moment
- vcc.tx_rate = 0x0;
-
- qos = &atm_vcc->qos;
-
- // check AAL and remember it
- switch (qos->aal) {
- case ATM_AAL0:
- // we would if it were 48 bytes and not 52!
- PRINTD (DBG_QOS|DBG_VCC, "AAL0");
- vcc.aal = aal0;
- break;
- case ATM_AAL34:
- // we would if I knew how do the SAR!
- PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
- vcc.aal = aal34;
- break;
- case ATM_AAL5:
- PRINTD (DBG_QOS|DBG_VCC, "AAL5");
- vcc.aal = aal5;
- break;
- default:
- PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
- return -EINVAL;
- }
-
- // TX traffic parameters
-
- // there are two, interrelated problems here: 1. the reservation of
- // PCR is not a binary choice, we are given bounds and/or a
- // desirable value; 2. the device is only capable of certain values,
- // most of which are not integers. It is almost certainly acceptable
- // to be off by a maximum of 1 to 10 cps.
-
- // Pragmatic choice: always store an integral PCR as that which has
- // been allocated, even if we allocate a little (or a lot) less,
- // after rounding. The actual allocation depends on what we can
- // manage with our rate selection algorithm. The rate selection
- // algorithm is given an integral PCR and a tolerance and told
- // whether it should round the value up or down if the tolerance is
- // exceeded; it returns: a) the actual rate selected (rounded up to
- // the nearest integer), b) a bit pattern to feed to the timer
- // register, and c) a failure value if no applicable rate exists.
-
- // Part of the job is done by atm_pcr_goal which gives us a PCR
- // specification which says: EITHER grab the maximum available PCR
- // (and perhaps a lower bound which we must not pass), OR grab this
- // amount, rounding down if you have to (and perhaps a lower bound
- // which we must not pass) OR grab this amount, rounding up if you
- // have to (and perhaps an upper bound which we must not pass). If any
- // bounds ARE passed we fail. Note that rounding is only rounding to
- // match device limitations, we do not round down to satisfy
- // bandwidth availability even if this would not violate any given
- // lower bound.
-
- // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
- // (say) so this is not even a binary fixpoint cell rate (but this
- // device can do it). To avoid this sort of hassle we use a
- // tolerance parameter (currently fixed at 10 cps).
-
- PRINTD (DBG_QOS, "TX:");
-
- txtp = &qos->txtp;
-
- // set up defaults for no traffic
- vcc.tx_rate = 0;
- // who knows what would actually happen if you try and send on this?
- vcc.tx_xbr_bits = IDLE_RATE_TYPE;
- vcc.tx_pcr_bits = CLOCK_DISABLE;
-#if 0
- vcc.tx_scr_bits = CLOCK_DISABLE;
- vcc.tx_bucket_bits = 0;
-#endif
-
- if (txtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, txtp, max_tx_size);
- if (error) {
- PRINTD (DBG_QOS, "TX max_sdu check failed");
- return error;
- }
-
- switch (txtp->traffic_class) {
- case ATM_UBR: {
- // we take "the PCR" as a rate-cap
- // not reserved
- vcc.tx_rate = 0;
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min, allow up to max
- vcc.tx_rate = 0; // ?
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (txtp);
- rounding r;
- if (!pcr) {
- // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
- // should really have: once someone gets unlimited bandwidth
- // that no more non-UBR channels can be opened until the
- // unlimited one closes?? For the moment, round_down means
- // greedy people actually get something and not nothing
- r = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- pcr = dev->tx_avail;
- } else if (pcr < 0) {
- r = round_down;
- pcr = -pcr;
- } else {
- r = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, r, 10,
- &vcc.tx_pcr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX PCR");
- return error;
- }
- // not really clear what further checking is needed
- error = atm_pcr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX PCR failed consistency check");
- return error;
- }
- vcc.tx_xbr_bits = CBR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_VBR: {
- int pcr = atm_pcr_goal (txtp);
- // int scr = atm_scr_goal (txtp);
- int scr = pcr/2; // just for fun
- unsigned int mbs = 60; // just for fun
- rounding pr;
- rounding sr;
- unsigned int bucket;
- if (!pcr) {
- pr = round_nearest;
- pcr = 1<<30;
- } else if (pcr < 0) {
- pr = round_down;
- pcr = -pcr;
- } else {
- pr = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, pr, 10,
- &vcc.tx_pcr_bits, 0);
- if (!scr) {
- // see comments for PCR with CBR above
- sr = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- scr = dev->tx_avail;
- } else if (scr < 0) {
- sr = round_down;
- scr = -scr;
- } else {
- sr = round_up;
- }
- error = make_rate_with_tolerance (dev, scr, sr, 10,
- &vcc.tx_scr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX SCR");
- return error;
- }
- // not really clear what further checking is needed
- // error = atm_scr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX SCR failed consistency check");
- return error;
- }
- // bucket calculations (from a piece of paper...) cell bucket
- // capacity must be largest integer smaller than m(p-s)/p + 1
- // where m = max burst size, p = pcr, s = scr
- bucket = mbs*(pcr-scr)/pcr;
- if (bucket*pcr != mbs*(pcr-scr))
- bucket += 1;
- if (bucket > BUCKET_MAX_SIZE) {
- PRINTD (DBG_QOS, "shrinking bucket from %u to %u",
- bucket, BUCKET_MAX_SIZE);
- bucket = BUCKET_MAX_SIZE;
- }
- vcc.tx_xbr_bits = VBR_RATE_TYPE;
- vcc.tx_bucket_bits = bucket;
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported TX traffic class");
- return -EINVAL;
- }
- }
- }
-
- // RX traffic parameters
-
- PRINTD (DBG_QOS, "RX:");
-
- rxtp = &qos->rxtp;
-
- // set up defaults for no traffic
- vcc.rx_rate = 0;
-
- if (rxtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
- if (error) {
- PRINTD (DBG_QOS, "RX max_sdu check failed");
- return error;
- }
- switch (rxtp->traffic_class) {
- case ATM_UBR: {
- // not reserved
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min
- vcc.rx_rate = 0; // ?
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (rxtp);
- if (!pcr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- pcr = dev->rx_avail;
- } else if (pcr < 0) {
- pcr = -pcr;
- }
- vcc.rx_rate = pcr;
- // not really clear what further checking is needed
- error = atm_pcr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX PCR failed consistency check");
- return error;
- }
- break;
- }
-#if 0
- case ATM_VBR: {
- // int scr = atm_scr_goal (rxtp);
- int scr = 1<<16; // just for fun
- if (!scr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- scr = dev->rx_avail;
- } else if (scr < 0) {
- scr = -scr;
- }
- vcc.rx_rate = scr;
- // not really clear what further checking is needed
- // error = atm_scr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX SCR failed consistency check");
- return error;
- }
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported RX traffic class");
- return -EINVAL;
- }
- }
- }
-
-
- // late abort useful for diagnostics
- if (vcc.aal != aal5) {
- PRINTD (DBG_QOS, "AAL not supported");
- return -EINVAL;
- }
-
- // get space for our vcc stuff and copy parameters into it
- vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
- if (!vccp) {
- PRINTK (KERN_ERR, "out of memory!");
- return -ENOMEM;
- }
- *vccp = vcc;
-
- // clear error and grab cell rate resource lock
- error = 0;
- spin_lock (&dev->rate_lock);
-
- if (vcc.tx_rate > dev->tx_avail) {
- PRINTD (DBG_QOS, "not enough TX PCR left");
- error = -EAGAIN;
- }
-
- if (vcc.rx_rate > dev->rx_avail) {
- PRINTD (DBG_QOS, "not enough RX PCR left");
- error = -EAGAIN;
- }
-
- if (!error) {
- // really consume cell rates
- dev->tx_avail -= vcc.tx_rate;
- dev->rx_avail -= vcc.rx_rate;
- PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
- vcc.tx_rate, vcc.rx_rate);
- }
-
- // release lock and exit on error
- spin_unlock (&dev->rate_lock);
- if (error) {
- PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
- kfree (vccp);
- return error;
- }
-
- // this is "immediately before allocating the connection identifier
- // in hardware" - so long as the next call does not fail :)
- set_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- // any errors here are very serious and should never occur
-
- if (rxtp->traffic_class != ATM_NONE) {
- if (dev->rxer[channel]) {
- PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
- error = -EBUSY;
- }
- if (!error)
- error = hrz_open_rx (dev, channel);
- if (error) {
- kfree (vccp);
- return error;
- }
- // this link allows RX frames through
- dev->rxer[channel] = atm_vcc;
- }
-
- // success, set elements of atm_vcc
- atm_vcc->dev_data = (void *) vccp;
-
- // indicate readiness
- set_bit(ATM_VF_READY,&atm_vcc->flags);
-
- return 0;
-}
-
-/********** close VC **********/
-
-static void hrz_close (struct atm_vcc * atm_vcc) {
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
- PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
-
- // indicate unreadiness
- clear_bit(ATM_VF_READY,&atm_vcc->flags);
-
- if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
- unsigned int i;
-
- // let any TX on this channel that has started complete
- // no restart, just keep trying
- while (tx_hold (dev))
- ;
- // remove record of any tx_channel having been setup for this channel
- for (i = 0; i < TX_CHANS; ++i)
- if (dev->tx_channel_record[i] == channel) {
- dev->tx_channel_record[i] = -1;
- break;
- }
- if (dev->last_vc == channel)
- dev->tx_last = -1;
- tx_release (dev);
- }
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
- // disable RXing - it tries quite hard
- hrz_close_rx (dev, channel);
- // forget the vcc - no more skbs will be pushed
- if (atm_vcc != dev->rxer[channel])
- PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
- "arghhh! we're going to die!",
- atm_vcc, dev->rxer[channel]);
- dev->rxer[channel] = NULL;
- }
-
- // atomically release our rate reservation
- spin_lock (&dev->rate_lock);
- PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
- vcc->tx_rate, vcc->rx_rate);
- dev->tx_avail += vcc->tx_rate;
- dev->rx_avail += vcc->rx_rate;
- spin_unlock (&dev->rate_lock);
-
- // free our structure
- kfree (vcc);
- // say the VPI/VCI is free again
- clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
-}
-
-#if 0
-static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_ioctl");
- return -1;
-}
-
-unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_get");
- return 0;
-}
-
-static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value,
- unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_put");
-}
-
-static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) {
- hrz_dev * dev = HRZ_DEV(vcc->dev);
- PRINTD (DBG_FLOW, "hrz_change_qos");
- return -1;
-}
-#endif
-
-/********** proc file contents **********/
-
-static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- int left = *pos;
- PRINTD (DBG_FLOW, "hrz_proc_read");
-
- /* more diagnostics here? */
-
-#if 0
- if (!left--) {
- unsigned int count = sprintf (page, "vbr buckets:");
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- count += sprintf (page, " %u/%u",
- query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS),
- query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS));
- count += sprintf (page+count, ".\n");
- return count;
- }
-#endif
-
- if (!left--)
- return sprintf (page,
- "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
- dev->tx_cell_count, dev->rx_cell_count,
- dev->hec_error_count, dev->unassigned_cell_count);
-
- if (!left--)
- return sprintf (page,
- "free cell buffers: TX %hu, RX %hu+%hu.\n",
- rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
- rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
- dev->noof_spare_buffers);
-
- if (!left--)
- return sprintf (page,
- "cps remaining: TX %u, RX %u\n",
- dev->tx_avail, dev->rx_avail);
-
- return 0;
-}
-
-static const struct atmdev_ops hrz_ops = {
- .open = hrz_open,
- .close = hrz_close,
- .send = hrz_send,
- .proc_read = hrz_proc_read,
- .owner = THIS_MODULE,
-};
-
-static int hrz_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_ent)
-{
- hrz_dev * dev;
- int err = 0;
-
- // adapter slot free, read resources from PCI configuration space
- u32 iobase = pci_resource_start (pci_dev, 0);
- u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
- unsigned int irq;
- unsigned char lat;
-
- PRINTD (DBG_FLOW, "hrz_probe");
-
- if (pci_enable_device(pci_dev))
- return -EINVAL;
-
- /* XXX DEV_LABEL is a guess */
- if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
- err = -EINVAL;
- goto out_disable;
- }
-
- dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
- if (!dev) {
- // perhaps we should be nice: deregister all adapters and abort?
- PRINTD(DBG_ERR, "out of memory");
- err = -ENOMEM;
- goto out_release;
- }
-
- pci_set_drvdata(pci_dev, dev);
-
- // grab IRQ and install handler - move this someplace more sensible
- irq = pci_dev->irq;
- if (request_irq(irq,
- interrupt_handler,
- IRQF_SHARED, /* irqflags guess */
- DEV_LABEL, /* name guess */
- dev)) {
- PRINTD(DBG_WARN, "request IRQ failed!");
- err = -EINVAL;
- goto out_free;
- }
-
- PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
- iobase, irq, membase);
-
- dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1,
- NULL);
- if (!(dev->atm_dev)) {
- PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
- err = -EINVAL;
- goto out_free_irq;
- }
-
- PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
- dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
- dev->pci_dev = pci_dev;
-
- // enable bus master accesses
- pci_set_master(pci_dev);
-
- // frobnicate latency (upwards, usually)
- pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
- if (pci_lat) {
- PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
- "changing", lat, pci_lat);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
- } else if (lat < MIN_PCI_LATENCY) {
- PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
- "increasing", lat, MIN_PCI_LATENCY);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
- }
-
- dev->iobase = iobase;
- dev->irq = irq;
- dev->membase = membase;
-
- dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
- dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1];
-
- // these next three are performance hacks
- dev->last_vc = -1;
- dev->tx_last = -1;
- dev->tx_idle = 0;
-
- dev->tx_regions = 0;
- dev->tx_bytes = 0;
- dev->tx_skb = NULL;
- dev->tx_iovec = NULL;
-
- dev->tx_cell_count = 0;
- dev->rx_cell_count = 0;
- dev->hec_error_count = 0;
- dev->unassigned_cell_count = 0;
-
- dev->noof_spare_buffers = 0;
-
- {
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- dev->tx_channel_record[i] = -1;
- }
-
- dev->flags = 0;
-
- // Allocate cell rates and remember ASIC version
- // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
- // Copper: (WRONG) we want 6 into the above, close to 25Mb/s
- // Copper: (plagarise!) 25600000/8/270*260/53 - n/53
-
- if (hrz_init(dev)) {
- // to be really pedantic, this should be ATM_OC3c_PCR
- dev->tx_avail = ATM_OC3_PCR;
- dev->rx_avail = ATM_OC3_PCR;
- set_bit(ultra, &dev->flags); // NOT "|= ultra" !
- } else {
- dev->tx_avail = ((25600000/8)*26)/(27*53);
- dev->rx_avail = ((25600000/8)*26)/(27*53);
- PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
- }
-
- // rate changes spinlock
- spin_lock_init(&dev->rate_lock);
-
- // on-board memory access spinlock; we want atomic reads and
- // writes to adapter memory (handles IRQ and SMP)
- spin_lock_init(&dev->mem_lock);
-
- init_waitqueue_head(&dev->tx_queue);
-
- // vpi in 0..4, vci in 6..10
- dev->atm_dev->ci_range.vpi_bits = vpi_bits;
- dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
-
- timer_setup(&dev->housekeeping, do_housekeeping, 0);
- mod_timer(&dev->housekeeping, jiffies);
-
-out:
- return err;
-
-out_free_irq:
- free_irq(irq, dev);
-out_free:
- kfree(dev);
-out_release:
- release_region(iobase, HRZ_IO_EXTENT);
-out_disable:
- pci_disable_device(pci_dev);
- goto out;
-}
-
-static void hrz_remove_one(struct pci_dev *pci_dev)
-{
- hrz_dev *dev;
-
- dev = pci_get_drvdata(pci_dev);
-
- PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
- del_timer_sync(&dev->housekeeping);
- hrz_reset(dev);
- atm_dev_deregister(dev->atm_dev);
- free_irq(dev->irq, dev);
- release_region(dev->iobase, HRZ_IO_EXTENT);
- kfree(dev);
-
- pci_disable_device(pci_dev);
-}
-
-static void __init hrz_check_args (void) {
-#ifdef DEBUG_HORIZON
- PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
-#else
- if (debug)
- PRINTK (KERN_NOTICE, "no debug support in this image");
-#endif
-
- if (vpi_bits > HRZ_MAX_VPI)
- PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
- vpi_bits = HRZ_MAX_VPI);
-
- if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
- max_tx_size = TX_AAL5_LIMIT);
-
- if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
- max_rx_size = RX_AAL5_LIMIT);
-
- return;
-}
-
-MODULE_AUTHOR(maintainer_string);
-MODULE_DESCRIPTION(description_string);
-MODULE_LICENSE("GPL");
-module_param(debug, ushort, 0644);
-module_param(vpi_bits, ushort, 0);
-module_param(max_tx_size, int, 0);
-module_param(max_rx_size, int, 0);
-module_param(pci_lat, byte, 0);
-MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
-MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
-MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
-MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
-MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-
-static const struct pci_device_id hrz_pci_tbl[] = {
- { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
-
-static struct pci_driver hrz_driver = {
- .name = "horizon",
- .probe = hrz_probe,
- .remove = hrz_remove_one,
- .id_table = hrz_pci_tbl,
-};
-
-/********** module entry **********/
-
-static int __init hrz_module_init (void) {
- BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4);
-
- show_version();
-
- // check arguments
- hrz_check_args();
-
- // get the juice
- return pci_register_driver(&hrz_driver);
-}
-
-/********** module exit **********/
-
-static void __exit hrz_module_exit (void) {
- PRINTD (DBG_FLOW, "cleanup_module");
-
- pci_unregister_driver(&hrz_driver);
-}
-
-module_init(hrz_module_init);
-module_exit(hrz_module_exit);
diff --git a/drivers/atm/horizon.h b/drivers/atm/horizon.h
deleted file mode 100644
index 7523eba19bad..000000000000
--- a/drivers/atm/horizon.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-/* too many macros - change to inline functions */
-
-#ifndef DRIVER_ATM_HORIZON_H
-#define DRIVER_ATM_HORIZON_H
-
-
-#ifdef CONFIG_ATM_HORIZON_DEBUG
-#define DEBUG_HORIZON
-#endif
-
-#define DEV_LABEL "hrz"
-
-#ifndef PCI_VENDOR_ID_MADGE
-#define PCI_VENDOR_ID_MADGE 0x10B6
-#endif
-#ifndef PCI_DEVICE_ID_MADGE_HORIZON
-#define PCI_DEVICE_ID_MADGE_HORIZON 0x1000
-#endif
-
-// diagnostic output
-
-#define PRINTK(severity,format,args...) \
- printk(severity DEV_LABEL ": " format "\n" , ## args)
-
-#ifdef DEBUG_HORIZON
-
-#define DBG_ERR 0x0001
-#define DBG_WARN 0x0002
-#define DBG_INFO 0x0004
-#define DBG_VCC 0x0008
-#define DBG_QOS 0x0010
-#define DBG_TX 0x0020
-#define DBG_RX 0x0040
-#define DBG_SKB 0x0080
-#define DBG_IRQ 0x0100
-#define DBG_FLOW 0x0200
-#define DBG_BUS 0x0400
-#define DBG_REGS 0x0800
-#define DBG_DATA 0x1000
-#define DBG_MASK 0x1fff
-
-/* the ## prevents the annoying double expansion of the macro arguments */
-/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */
-#define PRINTDB(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 )
-#define PRINTDM(bits,format,args...) \
- ( (debug & (bits)) ? printk (format , ## args) : 1 )
-#define PRINTDE(bits,format,args...) \
- ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 )
-#define PRINTD(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 )
-
-#else
-
-#define PRINTD(bits,format,args...)
-#define PRINTDB(bits,format,args...)
-#define PRINTDM(bits,format,args...)
-#define PRINTDE(bits,format,args...)
-
-#endif
-
-#define PRINTDD(sec,fmt,args...)
-#define PRINTDDB(sec,fmt,args...)
-#define PRINTDDM(sec,fmt,args...)
-#define PRINTDDE(sec,fmt,args...)
-
-// fixed constants
-
-#define SPARE_BUFFER_POOL_SIZE MAX_VCS
-#define HRZ_MAX_VPI 4
-#define MIN_PCI_LATENCY 48 // 24 IS TOO SMALL
-
-/* Horizon specific bits */
-/* Register offsets */
-
-#define HRZ_IO_EXTENT 0x80
-
-#define DATA_PORT_OFF 0x00
-#define TX_CHANNEL_PORT_OFF 0x04
-#define TX_DESCRIPTOR_PORT_OFF 0x08
-#define MEMORY_PORT_OFF 0x0C
-#define MEM_WR_ADDR_REG_OFF 0x14
-#define MEM_RD_ADDR_REG_OFF 0x18
-#define CONTROL_0_REG 0x1C
-#define INT_SOURCE_REG_OFF 0x20
-#define INT_ENABLE_REG_OFF 0x24
-#define MASTER_RX_ADDR_REG_OFF 0x28
-#define MASTER_RX_COUNT_REG_OFF 0x2C
-#define MASTER_TX_ADDR_REG_OFF 0x30
-#define MASTER_TX_COUNT_REG_OFF 0x34
-#define TX_DESCRIPTOR_REG_OFF 0x38
-#define TX_CHANNEL_CONFIG_COMMAND_OFF 0x40
-#define TX_CHANNEL_CONFIG_DATA_OFF 0x44
-#define TX_FREE_BUFFER_COUNT_OFF 0x48
-#define RX_FREE_BUFFER_COUNT_OFF 0x4C
-#define TX_CONFIG_OFF 0x50
-#define TX_STATUS_OFF 0x54
-#define RX_CONFIG_OFF 0x58
-#define RX_LINE_CONFIG_OFF 0x5C
-#define RX_QUEUE_RD_PTR_OFF 0x60
-#define RX_QUEUE_WR_PTR_OFF 0x64
-#define MAX_AAL5_CELL_COUNT_OFF 0x68
-#define RX_CHANNEL_PORT_OFF 0x6C
-#define TX_CELL_COUNT_OFF 0x70
-#define RX_CELL_COUNT_OFF 0x74
-#define HEC_ERROR_COUNT_OFF 0x78
-#define UNASSIGNED_CELL_COUNT_OFF 0x7C
-
-/* Register bit definitions */
-
-/* Control 0 register */
-
-#define SEEPROM_DO 0x00000001
-#define SEEPROM_DI 0x00000002
-#define SEEPROM_SK 0x00000004
-#define SEEPROM_CS 0x00000008
-#define DEBUG_BIT_0 0x00000010
-#define DEBUG_BIT_1 0x00000020
-#define DEBUG_BIT_2 0x00000040
-// RESERVED 0x00000080
-#define DEBUG_BIT_0_OE 0x00000100
-#define DEBUG_BIT_1_OE 0x00000200
-#define DEBUG_BIT_2_OE 0x00000400
-// RESERVED 0x00000800
-#define DEBUG_BIT_0_STATE 0x00001000
-#define DEBUG_BIT_1_STATE 0x00002000
-#define DEBUG_BIT_2_STATE 0x00004000
-// RESERVED 0x00008000
-#define GENERAL_BIT_0 0x00010000
-#define GENERAL_BIT_1 0x00020000
-#define GENERAL_BIT_2 0x00040000
-#define GENERAL_BIT_3 0x00080000
-#define RESET_HORIZON 0x00100000
-#define RESET_ATM 0x00200000
-#define RESET_RX 0x00400000
-#define RESET_TX 0x00800000
-#define RESET_HOST 0x01000000
-// RESERVED 0x02000000
-#define TARGET_RETRY_DISABLE 0x04000000
-#define ATM_LAYER_SELECT 0x08000000
-#define ATM_LAYER_STATUS 0x10000000
-// RESERVED 0xE0000000
-
-/* Interrupt source and enable registers */
-
-#define RX_DATA_AV 0x00000001
-#define RX_DISABLED 0x00000002
-#define TIMING_MARKER 0x00000004
-#define FORCED 0x00000008
-#define RX_BUS_MASTER_COMPLETE 0x00000010
-#define TX_BUS_MASTER_COMPLETE 0x00000020
-#define ABR_TX_CELL_COUNT_INT 0x00000040
-#define DEBUG_INT 0x00000080
-// RESERVED 0xFFFFFF00
-
-/* PIO and Bus Mastering */
-
-#define MAX_PIO_COUNT 0x000000ff // 255 - make tunable?
-// 8188 is a hard limit for bus mastering
-#define MAX_TRANSFER_COUNT 0x00001ffc // 8188
-#define MASTER_TX_AUTO_APPEND_DESC 0x80000000
-
-/* TX channel config command port */
-
-#define PCR_TIMER_ACCESS 0x0000
-#define SCR_TIMER_ACCESS 0x0001
-#define BUCKET_CAPACITY_ACCESS 0x0002
-#define BUCKET_FULLNESS_ACCESS 0x0003
-#define RATE_TYPE_ACCESS 0x0004
-// UNUSED 0x00F8
-#define TX_CHANNEL_CONFIG_MULT 0x0100
-// UNUSED 0xF800
-#define BUCKET_MAX_SIZE 0x003f
-
-/* TX channel config data port */
-
-#define CLOCK_SELECT_SHIFT 4
-#define CLOCK_DISABLE 0x00ff
-
-#define IDLE_RATE_TYPE 0x0
-#define ABR_RATE_TYPE 0x1
-#define VBR_RATE_TYPE 0x2
-#define CBR_RATE_TYPE 0x3
-
-/* TX config register */
-
-#define DRVR_DRVRBAR_ENABLE 0x0001
-#define TXCLK_MUX_SELECT_RCLK 0x0002
-#define TRANSMIT_TIMING_MARKER 0x0004
-#define LOOPBACK_TIMING_MARKER 0x0008
-#define TX_TEST_MODE_16MHz 0x0000
-#define TX_TEST_MODE_8MHz 0x0010
-#define TX_TEST_MODE_5_33MHz 0x0020
-#define TX_TEST_MODE_4MHz 0x0030
-#define TX_TEST_MODE_3_2MHz 0x0040
-#define TX_TEST_MODE_2_66MHz 0x0050
-#define TX_TEST_MODE_2_29MHz 0x0060
-#define TX_NORMAL_OPERATION 0x0070
-#define ABR_ROUND_ROBIN 0x0080
-
-/* TX status register */
-
-#define IDLE_CHANNELS_MASK 0x00FF
-#define ABR_CELL_COUNT_REACHED_MULT 0x0100
-#define ABR_CELL_COUNT_REACHED_MASK 0xFF
-
-/* RX config register */
-
-#define NON_USER_CELLS_IN_ONE_CHANNEL 0x0008
-#define RX_ENABLE 0x0010
-#define IGNORE_UNUSED_VPI_VCI_BITS_SET 0x0000
-#define NON_USER_UNUSED_VPI_VCI_BITS_SET 0x0020
-#define DISCARD_UNUSED_VPI_VCI_BITS_SET 0x0040
-
-/* RX line config register */
-
-#define SIGNAL_LOSS 0x0001
-#define FREQUENCY_DETECT_ERROR 0x0002
-#define LOCK_DETECT_ERROR 0x0004
-#define SELECT_INTERNAL_LOOPBACK 0x0008
-#define LOCK_DETECT_ENABLE 0x0010
-#define FREQUENCY_DETECT_ENABLE 0x0020
-#define USER_FRAQ 0x0040
-#define GXTALOUT_SELECT_DIV4 0x0080
-#define GXTALOUT_SELECT_NO_GATING 0x0100
-#define TIMING_MARKER_RECEIVED 0x0200
-
-/* RX channel port */
-
-#define RX_CHANNEL_MASK 0x03FF
-// UNUSED 0x3C00
-#define FLUSH_CHANNEL 0x4000
-#define RX_CHANNEL_UPDATE_IN_PROGRESS 0x8000
-
-/* Receive queue entry */
-
-#define RX_Q_ENTRY_LENGTH_MASK 0x0000FFFF
-#define RX_Q_ENTRY_CHANNEL_SHIFT 16
-#define SIMONS_DODGEY_MARKER 0x08000000
-#define RX_CONGESTION_EXPERIENCED 0x10000000
-#define RX_CRC_10_OK 0x20000000
-#define RX_CRC_32_OK 0x40000000
-#define RX_COMPLETE_FRAME 0x80000000
-
-/* Offsets and constants for use with the buffer memory */
-
-/* Buffer pointers and channel types */
-
-#define BUFFER_PTR_MASK 0x0000FFFF
-#define RX_INT_THRESHOLD_MULT 0x00010000
-#define RX_INT_THRESHOLD_MASK 0x07FF
-#define INT_EVERY_N_CELLS 0x08000000
-#define CONGESTION_EXPERIENCED 0x10000000
-#define FIRST_CELL_OF_AAL5_FRAME 0x20000000
-#define CHANNEL_TYPE_AAL5 0x00000000
-#define CHANNEL_TYPE_RAW_CELLS 0x40000000
-#define CHANNEL_TYPE_AAL3_4 0x80000000
-
-/* Buffer status stuff */
-
-#define BUFF_STATUS_MASK 0x00030000
-#define BUFF_STATUS_EMPTY 0x00000000
-#define BUFF_STATUS_CELL_AV 0x00010000
-#define BUFF_STATUS_LAST_CELL_AV 0x00020000
-
-/* Transmit channel stuff */
-
-/* Receive channel stuff */
-
-#define RX_CHANNEL_DISABLED 0x00000000
-#define RX_CHANNEL_IDLE 0x00000001
-
-/* General things */
-
-#define INITIAL_CRC 0xFFFFFFFF
-
-// A Horizon u32, a byte! Really nasty. Horizon pointers are (32 bit)
-// word addresses and so standard C pointer operations break (as they
-// assume byte addresses); so we pretend that Horizon words (and word
-// pointers) are bytes (and byte pointers) for the purposes of having
-// a memory map that works.
-
-typedef u8 HDW;
-
-typedef struct cell_buf {
- HDW payload[12];
- HDW next;
- HDW cell_count; // AAL5 rx bufs
- HDW res;
- union {
- HDW partial_crc; // AAL5 rx bufs
- HDW cell_header; // RAW bufs
- } u;
-} cell_buf;
-
-typedef struct tx_ch_desc {
- HDW rd_buf_type;
- HDW wr_buf_type;
- HDW partial_crc;
- HDW cell_header;
-} tx_ch_desc;
-
-typedef struct rx_ch_desc {
- HDW wr_buf_type;
- HDW rd_buf_type;
-} rx_ch_desc;
-
-typedef struct rx_q_entry {
- HDW entry;
-} rx_q_entry;
-
-#define TX_CHANS 8
-#define RX_CHANS 1024
-#define RX_QS 1024
-#define MAX_VCS RX_CHANS
-
-/* Horizon buffer memory map */
-
-// TX Channel Descriptors 2
-// TX Initial Buffers 8 // TX_CHANS
-#define BUFN1_SIZE 118 // (126 - TX_CHANS)
-// RX/TX Start/End Buffers 4
-#define BUFN2_SIZE 124
-// RX Queue Entries 64
-#define BUFN3_SIZE 192
-// RX Channel Descriptors 128
-#define BUFN4_SIZE 1408
-// TOTAL cell_buff chunks 2048
-
-// cell_buf bufs[2048];
-// HDW dws[32768];
-
-typedef struct MEMMAP {
- tx_ch_desc tx_descs[TX_CHANS]; // 8 * 4 = 32 , 0x0020
- cell_buf inittxbufs[TX_CHANS]; // these are really
- cell_buf bufn1[BUFN1_SIZE]; // part of this pool
- cell_buf txfreebufstart;
- cell_buf txfreebufend;
- cell_buf rxfreebufstart;
- cell_buf rxfreebufend; // 8+118+1+1+1+1+124 = 254
- cell_buf bufn2[BUFN2_SIZE]; // 16 * 254 = 4064 , 0x1000
- rx_q_entry rx_q_entries[RX_QS]; // 1 * 1024 = 1024 , 0x1400
- cell_buf bufn3[BUFN3_SIZE]; // 16 * 192 = 3072 , 0x2000
- rx_ch_desc rx_descs[MAX_VCS]; // 2 * 1024 = 2048 , 0x2800
- cell_buf bufn4[BUFN4_SIZE]; // 16 * 1408 = 22528 , 0x8000
-} MEMMAP;
-
-#define memmap ((MEMMAP *)0)
-
-/* end horizon specific bits */
-
-typedef enum {
- aal0,
- aal34,
- aal5
-} hrz_aal;
-
-typedef enum {
- tx_busy,
- rx_busy,
- ultra
-} hrz_flags;
-
-// a single struct pointed to by atm_vcc->dev_data
-
-typedef struct {
- unsigned int tx_rate;
- unsigned int rx_rate;
- u16 channel;
- u16 tx_xbr_bits;
- u16 tx_pcr_bits;
-#if 0
- u16 tx_scr_bits;
- u16 tx_bucket_bits;
-#endif
- hrz_aal aal;
-} hrz_vcc;
-
-struct hrz_dev {
-
- u32 iobase;
- u32 * membase;
-
- struct sk_buff * rx_skb; // skb being RXed
- unsigned int rx_bytes; // bytes remaining to RX within region
- void * rx_addr; // addr to send bytes to (for PIO)
- unsigned int rx_channel; // channel that the skb is going out on
-
- struct sk_buff * tx_skb; // skb being TXed
- unsigned int tx_bytes; // bytes remaining to TX within region
- void * tx_addr; // addr to send bytes from (for PIO)
- struct iovec * tx_iovec; // remaining regions
- unsigned int tx_regions; // number of remaining regions
-
- spinlock_t mem_lock;
- wait_queue_head_t tx_queue;
-
- u8 irq;
- unsigned long flags;
- u8 tx_last;
- u8 tx_idle;
-
- rx_q_entry * rx_q_reset;
- rx_q_entry * rx_q_entry;
- rx_q_entry * rx_q_wrap;
-
- struct atm_dev * atm_dev;
-
- u32 last_vc;
-
- int noof_spare_buffers;
- u16 spare_buffers[SPARE_BUFFER_POOL_SIZE];
-
- u16 tx_channel_record[TX_CHANS];
-
- // this is what we follow when we get incoming data
- u32 txer[MAX_VCS/32];
- struct atm_vcc * rxer[MAX_VCS];
-
- // cell rate allocation
- spinlock_t rate_lock;
- unsigned int rx_avail;
- unsigned int tx_avail;
-
- // dev stats
- unsigned long tx_cell_count;
- unsigned long rx_cell_count;
- unsigned long hec_error_count;
- unsigned long unassigned_cell_count;
-
- struct pci_dev * pci_dev;
- struct timer_list housekeeping;
-};
-
-typedef struct hrz_dev hrz_dev;
-
-/* macros for use later */
-
-#define BUF_PTR(cbptr) ((cbptr) - (cell_buf *) 0)
-
-#define INTERESTING_INTERRUPTS \
- (RX_DATA_AV | RX_DISABLED | TX_BUS_MASTER_COMPLETE | RX_BUS_MASTER_COMPLETE)
-
-// 190 cells by default (192 TX buffers - 2 elbow room, see docs)
-#define TX_AAL5_LIMIT (190*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER) // 9112
-
-// Have enough RX buffers (unless we allow other buffer splits)
-#define RX_AAL5_LIMIT ATM_MAX_AAL5_PDU
-
-/* multi-statement macro protector */
-#define DW(x) do{ x } while(0)
-
-#define HRZ_DEV(atm_dev) ((hrz_dev *) (atm_dev)->dev_data)
-#define HRZ_VCC(atm_vcc) ((hrz_vcc *) (atm_vcc)->dev_data)
-
-/* Turn the LEDs on and off */
-// The LEDs bits are upside down in that setting the bit in the debug
-// register will turn the appropriate LED off.
-
-#define YELLOW_LED DEBUG_BIT_0
-#define GREEN_LED DEBUG_BIT_1
-#define YELLOW_LED_OE DEBUG_BIT_0_OE
-#define GREEN_LED_OE DEBUG_BIT_1_OE
-
-#define GREEN_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | GREEN_LED)
-#define GREEN_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ GREEN_LED)
-#define YELLOW_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | YELLOW_LED)
-#define YELLOW_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ YELLOW_LED)
-
-typedef enum {
- round_up,
- round_down,
- round_nearest
-} rounding;
-
-#endif /* DRIVER_ATM_HORIZON_H */
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index e0dda9062e6b..791f69a07ddf 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -14,11 +14,6 @@ typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
-/*
- This was the original definition
-#define osp_MicroDelay(microsec) \
- do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
-*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
/*
diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h
deleted file mode 100644
index f766a5ef0c5d..000000000000
--- a/drivers/atm/uPD98401.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98401_H
-#define DRIVERS_ATM_uPD98401_H
-
-
-#define MAX_CRAM_SIZE (1 << 18) /* 2^18 words */
-#define RAM_INCREMENT 1024 /* check in 4 kB increments */
-
-#define uPD98401_PORTS 0x24 /* probably more ? */
-
-
-/*
- * Commands
- */
-
-#define uPD98401_OPEN_CHAN 0x20000000 /* open channel */
-#define uPD98401_CHAN_ADDR 0x0003fff8 /* channel address */
-#define uPD98401_CHAN_ADDR_SHIFT 3
-#define uPD98401_CLOSE_CHAN 0x24000000 /* close channel */
-#define uPD98401_CHAN_RT 0x02000000 /* RX/TX (0 TX, 1 RX) */
-#define uPD98401_DEACT_CHAN 0x28000000 /* deactivate channel */
-#define uPD98401_TX_READY 0x30000000 /* TX ready */
-#define uPD98401_ADD_BAT 0x34000000 /* add batches */
-#define uPD98401_POOL 0x000f0000 /* pool number */
-#define uPD98401_POOL_SHIFT 16
-#define uPD98401_POOL_NUMBAT 0x0000ffff /* number of batches */
-#define uPD98401_NOP 0x3f000000 /* NOP */
-#define uPD98401_IND_ACC 0x00000000 /* Indirect Access */
-#define uPD98401_IA_RW 0x10000000 /* Read/Write (0 W, 1 R) */
-#define uPD98401_IA_B3 0x08000000 /* Byte select, 1 enable */
-#define uPD98401_IA_B2 0x04000000
-#define uPD98401_IA_B1 0x02000000
-#define uPD98401_IA_B0 0x01000000
-#define uPD98401_IA_BALL 0x0f000000 /* whole longword */
-#define uPD98401_IA_TGT 0x000c0000 /* Target */
-#define uPD98401_IA_TGT_SHIFT 18
-#define uPD98401_IA_TGT_CM 0 /* - Control Memory */
-#define uPD98401_IA_TGT_SAR 1 /* - uPD98401 registers */
-#define uPD98401_IA_TGT_PHY 3 /* - PHY device */
-#define uPD98401_IA_ADDR 0x0003ffff
-
-/*
- * Command Register Status
- */
-
-#define uPD98401_BUSY 0x80000000 /* SAR is busy */
-#define uPD98401_LOCKED 0x40000000 /* SAR is locked by other CPU */
-
-/*
- * Indications
- */
-
-/* Normal (AAL5) Receive Indication */
-#define uPD98401_AAL5_UINFO 0xffff0000 /* user-supplied information */
-#define uPD98401_AAL5_UINFO_SHIFT 16
-#define uPD98401_AAL5_SIZE 0x0000ffff /* PDU size (in _CELLS_ !!) */
-#define uPD98401_AAL5_CHAN 0x7fff0000 /* Channel number */
-#define uPD98401_AAL5_CHAN_SHIFT 16
-#define uPD98401_AAL5_ERR 0x00008000 /* Error indication */
-#define uPD98401_AAL5_CI 0x00004000 /* Congestion Indication */
-#define uPD98401_AAL5_CLP 0x00002000 /* CLP (>= 1 cell had CLP=1) */
-#define uPD98401_AAL5_ES 0x00000f00 /* Error Status */
-#define uPD98401_AAL5_ES_SHIFT 8
-#define uPD98401_AAL5_ES_NONE 0 /* No error */
-#define uPD98401_AAL5_ES_FREE 1 /* Receiver free buf underflow */
-#define uPD98401_AAL5_ES_FIFO 2 /* Receiver FIFO overrun */
-#define uPD98401_AAL5_ES_TOOBIG 3 /* Maximum length violation */
-#define uPD98401_AAL5_ES_CRC 4 /* CRC error */
-#define uPD98401_AAL5_ES_ABORT 5 /* User abort */
-#define uPD98401_AAL5_ES_LENGTH 6 /* Length violation */
-#define uPD98401_AAL5_ES_T1 7 /* T1 error (timeout) */
-#define uPD98401_AAL5_ES_DEACT 8 /* Deactivated with DEACT_CHAN */
-#define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */
-
-/* Raw Cell Indication */
-#define uPD98401_RAW_UINFO uPD98401_AAL5_UINFO
-#define uPD98401_RAW_UINFO_SHIFT uPD98401_AAL5_UINFO_SHIFT
-#define uPD98401_RAW_HEC 0x000000ff /* HEC */
-#define uPD98401_RAW_CHAN uPD98401_AAL5_CHAN
-#define uPD98401_RAW_CHAN_SHIFT uPD98401_AAL5_CHAN_SHIFT
-
-/* Transmit Indication */
-#define uPD98401_TXI_CONN 0x7fff0000 /* Connection Number */
-#define uPD98401_TXI_CONN_SHIFT 16
-#define uPD98401_TXI_ACTIVE 0x00008000 /* Channel remains active */
-#define uPD98401_TXI_PQP 0x00007fff /* Packet Queue Pointer */
-
-/*
- * Directly Addressable Registers
- */
-
-#define uPD98401_GMR 0x00 /* General Mode Register */
-#define uPD98401_GSR 0x01 /* General Status Register */
-#define uPD98401_IMR 0x02 /* Interrupt Mask Register */
-#define uPD98401_RQU 0x03 /* Receive Queue Underrun */
-#define uPD98401_RQA 0x04 /* Receive Queue Alert */
-#define uPD98401_ADDR 0x05 /* Last Burst Address */
-#define uPD98401_VER 0x06 /* Version Number */
-#define uPD98401_SWR 0x07 /* Software Reset */
-#define uPD98401_CMR 0x08 /* Command Register */
-#define uPD98401_CMR_L 0x09 /* Command Register and Lock/Unlock */
-#define uPD98401_CER 0x0a /* Command Extension Register */
-#define uPD98401_CER_L 0x0b /* Command Ext Reg and Lock/Unlock */
-
-#define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */
-#define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */
-#define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */
-
-/* GMR is at 0x00 */
-#define uPD98401_GMR_ONE 0x80000000 /* Must be set to one */
-#define uPD98401_GMR_SLM 0x40000000 /* Address mode (0 word, 1 byte) */
-#define uPD98401_GMR_CPE 0x00008000 /* Control Memory Parity Enable */
-#define uPD98401_GMR_LP 0x00004000 /* Loopback */
-#define uPD98401_GMR_WA 0x00002000 /* Early Bus Write Abort/RDY */
-#define uPD98401_GMR_RA 0x00001000 /* Early Read Abort/RDY */
-#define uPD98401_GMR_SZ 0x00000f00 /* Burst Size Enable */
-#define uPD98401_BURST16 0x00000800 /* 16-word burst */
-#define uPD98401_BURST8 0x00000400 /* 8-word burst */
-#define uPD98401_BURST4 0x00000200 /* 4-word burst */
-#define uPD98401_BURST2 0x00000100 /* 2-word burst */
-#define uPD98401_GMR_AD 0x00000080 /* Address (burst resolution) Disable */
-#define uPD98401_GMR_BO 0x00000040 /* Byte Order (0 little, 1 big) */
-#define uPD98401_GMR_PM 0x00000020 /* Bus Parity Mode (0 byte, 1 word)*/
-#define uPD98401_GMR_PC 0x00000010 /* Bus Parity Control (0even,1odd) */
-#define uPD98401_GMR_BPE 0x00000008 /* Bus Parity Enable */
-#define uPD98401_GMR_DR 0x00000004 /* Receive Drop Mode (0drop,1don't)*/
-#define uPD98401_GMR_SE 0x00000002 /* Shapers Enable */
-#define uPD98401_GMR_RE 0x00000001 /* Receiver Enable */
-
-/* GSR is at 0x01, IMR is at 0x02 */
-#define uPD98401_INT_PI 0x80000000 /* PHY interrupt */
-#define uPD98401_INT_RQA 0x40000000 /* Receive Queue Alert */
-#define uPD98401_INT_RQU 0x20000000 /* Receive Queue Underrun */
-#define uPD98401_INT_RD 0x10000000 /* Receiver Deactivated */
-#define uPD98401_INT_SPE 0x08000000 /* System Parity Error */
-#define uPD98401_INT_CPE 0x04000000 /* Control Memory Parity Error */
-#define uPD98401_INT_SBE 0x02000000 /* System Bus Error */
-#define uPD98401_INT_IND 0x01000000 /* Initialization Done */
-#define uPD98401_INT_RCR 0x0000ff00 /* Raw Cell Received */
-#define uPD98401_INT_RCR_SHIFT 8
-#define uPD98401_INT_MF 0x000000f0 /* Mailbox Full */
-#define uPD98401_INT_MF_SHIFT 4
-#define uPD98401_INT_MM 0x0000000f /* Mailbox Modified */
-
-/* VER is at 0x06 */
-#define uPD98401_MAJOR 0x0000ff00 /* Major revision */
-#define uPD98401_MAJOR_SHIFT 8
-#define uPD98401_MINOR 0x000000ff /* Minor revision */
-
-/*
- * Indirectly Addressable Registers
- */
-
-#define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */
-#define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */
-#define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */
-#define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */
-#define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */
-
-/* IM contents */
-#define uPD98401_IM_I 0xff000000 /* I */
-#define uPD98401_IM_I_SHIFT 24
-#define uPD98401_IM_M 0x00ffffff /* M */
-
-/* PC contents */
-#define uPD98401_PC_P 0xff000000 /* P */
-#define uPD98401_PC_P_SHIFT 24
-#define uPD98401_PC_C 0x00ff0000 /* C */
-#define uPD98401_PC_C_SHIFT 16
-#define uPD98401_PC_p 0x0000ff00 /* p */
-#define uPD98401_PC_p_SHIFT 8
-#define uPD98401_PC_c 0x000000ff /* c */
-
-/* PS contents */
-#define uPD98401_PS_PRIO 0xf0 /* Priority level (0 high, 15 low) */
-#define uPD98401_PS_PRIO_SHIFT 4
-#define uPD98401_PS_S 0x08 /* Scan - must be 0 (internal) */
-#define uPD98401_PS_R 0x04 /* Round Robin (internal) */
-#define uPD98401_PS_A 0x02 /* Active (internal) */
-#define uPD98401_PS_E 0x01 /* Enabled */
-
-#define uPD98401_TOS 0x40100 /* Top of Stack Control Memory Address */
-#define uPD98401_SMA 0x40200 /* Shapers Control Memory Start Address */
-#define uPD98401_PMA 0x40201 /* Receive Pool Control Memory Start Address */
-#define uPD98401_T1R 0x40300 /* T1 Register */
-#define uPD98401_VRR 0x40301 /* VPI/VCI Reduction Register/Recv. Shutdown */
-#define uPD98401_TSR 0x40302 /* Time-Stamp Register */
-
-/* VRR is at 0x40301 */
-#define uPD98401_VRR_SDM 0x80000000 /* Shutdown Mode */
-#define uPD98401_VRR_SHIFT 0x000f0000 /* VPI/VCI Shift */
-#define uPD98401_VRR_SHIFT_SHIFT 16
-#define uPD98401_VRR_MASK 0x0000ffff /* VPI/VCI mask */
-
-/*
- * TX packet descriptor
- */
-
-#define uPD98401_TXPD_SIZE 16 /* descriptor size (in bytes) */
-
-#define uPD98401_TXPD_V 0x80000000 /* Valid bit */
-#define uPD98401_TXPD_DP 0x40000000 /* Descriptor (1) or Pointer (0) */
-#define uPD98401_TXPD_SM 0x20000000 /* Single (1) or Multiple (0) */
-#define uPD98401_TXPD_CLPM 0x18000000 /* CLP mode */
-#define uPD98401_CLPM_0 0 /* 00 CLP = 0 */
-#define uPD98401_CLPM_1 3 /* 11 CLP = 1 */
-#define uPD98401_CLPM_LAST 1 /* 01 CLP unless last cell */
-#define uPD98401_TXPD_CLPM_SHIFT 27
-#define uPD98401_TXPD_PTI 0x07000000 /* PTI pattern */
-#define uPD98401_TXPD_PTI_SHIFT 24
-#define uPD98401_TXPD_GFC 0x00f00000 /* GFC pattern */
-#define uPD98401_TXPD_GFC_SHIFT 20
-#define uPD98401_TXPD_C10 0x00040000 /* insert CRC-10 */
-#define uPD98401_TXPD_AAL5 0x00020000 /* AAL5 processing */
-#define uPD98401_TXPD_MB 0x00010000 /* TX mailbox number */
-#define uPD98401_TXPD_UU 0x0000ff00 /* CPCS-UU */
-#define uPD98401_TXPD_UU_SHIFT 8
-#define uPD98401_TXPD_CPI 0x000000ff /* CPI */
-
-/*
- * TX buffer descriptor
- */
-
-#define uPD98401_TXBD_SIZE 8 /* descriptor size (in bytes) */
-
-#define uPD98401_TXBD_LAST 0x80000000 /* last buffer in packet */
-
-/*
- * TX VC table
- */
-
-/* 1st word has the same structure as in a TX packet descriptor */
-#define uPD98401_TXVC_L 0x80000000 /* last buffer */
-#define uPD98401_TXVC_SHP 0x0f000000 /* shaper number */
-#define uPD98401_TXVC_SHP_SHIFT 24
-#define uPD98401_TXVC_VPI 0x00ff0000 /* VPI */
-#define uPD98401_TXVC_VPI_SHIFT 16
-#define uPD98401_TXVC_VCI 0x0000ffff /* VCI */
-#define uPD98401_TXVC_QRP 6 /* Queue Read Pointer is in word 6 */
-
-/*
- * RX free buffer pools descriptor
- */
-
-#define uPD98401_RXFP_ALERT 0x70000000 /* low water mark */
-#define uPD98401_RXFP_ALERT_SHIFT 28
-#define uPD98401_RXFP_BFSZ 0x0f000000 /* buffer size, 64*2^n */
-#define uPD98401_RXFP_BFSZ_SHIFT 24
-#define uPD98401_RXFP_BTSZ 0x00ff0000 /* batch size, n+1 */
-#define uPD98401_RXFP_BTSZ_SHIFT 16
-#define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */
-
-/*
- * RX VC table
- */
-
-#define uPD98401_RXVC_BTSZ 0xff000000 /* remaining free buffers in batch */
-#define uPD98401_RXVC_BTSZ_SHIFT 24
-#define uPD98401_RXVC_MB 0x00200000 /* RX mailbox number */
-#define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */
-#define uPD98401_RXVC_POOL_SHIFT 16
-#define uPD98401_RXVC_UINFO 0x0000ffff /* user-supplied information */
-#define uPD98401_RXVC_T1 0xffff0000 /* T1 timestamp */
-#define uPD98401_RXVC_T1_SHIFT 16
-#define uPD98401_RXVC_PR 0x00008000 /* Packet Reception, 1 if busy */
-#define uPD98401_RXVC_DR 0x00004000 /* FIFO Drop */
-#define uPD98401_RXVC_OD 0x00001000 /* Drop OAM cells */
-#define uPD98401_RXVC_AR 0x00000800 /* AAL5 or raw cell; 1 if AAL5 */
-#define uPD98401_RXVC_MAXSEG 0x000007ff /* max number of segments per PDU */
-#define uPD98401_RXVC_REM 0xfffe0000 /* remaining words in curr buffer */
-#define uPD98401_RXVC_REM_SHIFT 17
-#define uPD98401_RXVC_CLP 0x00010000 /* CLP received */
-#define uPD98401_RXVC_BFA 0x00008000 /* Buffer Assigned */
-#define uPD98401_RXVC_BTA 0x00004000 /* Batch Assigned */
-#define uPD98401_RXVC_CI 0x00002000 /* Congestion Indication */
-#define uPD98401_RXVC_DD 0x00001000 /* Dropping incoming cells */
-#define uPD98401_RXVC_DP 0x00000800 /* like PR ? */
-#define uPD98401_RXVC_CURSEG 0x000007ff /* Current Segment count */
-
-/*
- * RX lookup table
- */
-
-#define uPD98401_RXLT_ENBL 0x8000 /* Enable */
-
-#endif
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
deleted file mode 100644
index 239852d85558..000000000000
--- a/drivers/atm/uPD98402.c
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/atomic.h>
-
-#include "uPD98402.h"
-
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-
-struct uPD98402_priv {
- struct k_sonet_stats sonet_stats;/* link diagnostics */
- unsigned char framing; /* SONET/SDH framing */
- int loop_mode; /* loopback mode */
- spinlock_t lock;
-};
-
-
-#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data)
-
-#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg)
-#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg)
-
-
-static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero)
-{
- struct sonet_stats tmp;
- int error = 0;
-
- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
- sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
- if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
- if (zero && !error) {
- /* unused fields are reported as -1, but we must not "adjust"
- them */
- tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0;
- sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp);
- }
- return error ? -EFAULT : 0;
-}
-
-
-static int set_framing(struct atm_dev *dev,unsigned char framing)
-{
- static const unsigned char sonet[] = { 1,2,3,0 };
- static const unsigned char sdh[] = { 1,0,0,2 };
- const char *set;
- unsigned long flags;
-
- switch (framing) {
- case SONET_FRAME_SONET:
- set = sonet;
- break;
- case SONET_FRAME_SDH:
- set = sdh;
- break;
- default:
- return -EINVAL;
- }
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- PUT(set[0],C11T);
- PUT(set[1],C12T);
- PUT(set[2],C13T);
- PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] <<
- uPD98402_MDR_SS_SHIFT),MDR);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return 0;
-}
-
-
-static int get_sense(struct atm_dev *dev,u8 __user *arg)
-{
- unsigned long flags;
- unsigned char s[3];
-
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- s[0] = GET(C11R);
- s[1] = GET(C12R);
- s[2] = GET(C13R);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return (put_user(s[0], arg) || put_user(s[1], arg+1) ||
- put_user(s[2], arg+2) || put_user(0xff, arg+3) ||
- put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0;
-}
-
-
-static int set_loopback(struct atm_dev *dev,int mode)
-{
- unsigned char mode_reg;
-
- mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP |
- uPD98402_MDR_RPLP);
- switch (__ATM_LM_XTLOC(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_TPLP;
- break;
- case __ATM_LM_ATM:
- mode_reg |= uPD98402_MDR_ALP;
- break;
- default:
- return -EINVAL;
- }
- switch (__ATM_LM_XTRMT(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_RPLP;
- break;
- default:
- return -EINVAL;
- }
- PUT(mode_reg,MDR);
- PRIV(dev)->loop_mode = mode;
- return 0;
-}
-
-
-static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- switch (cmd) {
-
- case SONET_GETSTATZ:
- case SONET_GETSTAT:
- return fetch_stats(dev,arg, cmd == SONET_GETSTATZ);
- case SONET_SETFRAMING:
- return set_framing(dev, (int)(unsigned long)arg);
- case SONET_GETFRAMING:
- return put_user(PRIV(dev)->framing,(int __user *)arg) ?
- -EFAULT : 0;
- case SONET_GETFRSENSE:
- return get_sense(dev,arg);
- case ATM_SETLOOP:
- return set_loopback(dev, (int)(unsigned long)arg);
- case ATM_GETLOOP:
- return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ?
- -EFAULT : 0;
- case ATM_QUERYLOOP:
- return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM |
- ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0;
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-
-#define ADD_LIMITED(s,v) \
- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
-
-
-static void stat_event(struct atm_dev *dev)
-{
- unsigned char events;
-
- events = GET(PCR);
- if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB);
- if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT);
- if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT);
- if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT);
- if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT);
-}
-
-
-#undef ADD_LIMITED
-
-
-static void uPD98402_int(struct atm_dev *dev)
-{
- static unsigned long silence = 0;
- unsigned char reason;
-
- while ((reason = GET(PICR))) {
- if (reason & uPD98402_INT_LOS)
- printk(KERN_NOTICE "%s(itf %d): signal lost\n",
- dev->type,dev->number);
- if (reason & uPD98402_INT_PFM) stat_event(dev);
- if (reason & uPD98402_INT_PCO) {
- (void) GET(PCOCR); /* clear interrupt cause */
- atomic_add(GET(HECCT),
- &PRIV(dev)->sonet_stats.uncorr_hcs);
- }
- if ((reason & uPD98402_INT_RFO) &&
- (time_after(jiffies, silence) || silence == 0)) {
- printk(KERN_WARNING "%s(itf %d): uPD98402 receive "
- "FIFO overflow\n",dev->type,dev->number);
- silence = (jiffies+HZ/2)|1;
- }
- }
-}
-
-
-static int uPD98402_start(struct atm_dev *dev)
-{
- DPRINTK("phy_start\n");
- if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
- return -ENOMEM;
- spin_lock_init(&PRIV(dev)->lock);
- memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
- (void) GET(PCR); /* clear performance events */
- PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
- (void) GET(PCOCR); /* clear overflows */
- PUT(~uPD98402_PCO_HECC,PCOMR);
- (void) GET(PICR); /* clear interrupts */
- PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
- uPD98402_INT_LOS),PIMR); /* enable them */
- (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
- return 0;
-}
-
-
-static int uPD98402_stop(struct atm_dev *dev)
-{
- /* let SAR driver worry about stopping interrupts */
- kfree(PRIV(dev));
- return 0;
-}
-
-
-static const struct atmphy_ops uPD98402_ops = {
- .start = uPD98402_start,
- .ioctl = uPD98402_ioctl,
- .interrupt = uPD98402_int,
- .stop = uPD98402_stop,
-};
-
-
-int uPD98402_init(struct atm_dev *dev)
-{
-DPRINTK("phy_init\n");
- dev->phy = &uPD98402_ops;
- return 0;
-}
-
-
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(uPD98402_init);
-
-static __init int uPD98402_module_init(void)
-{
- return 0;
-}
-module_init(uPD98402_module_init);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h
deleted file mode 100644
index 437cfaa20c96..000000000000
--- a/drivers/atm/uPD98402.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98402_H
-#define DRIVERS_ATM_uPD98402_H
-
-/*
- * Registers
- */
-
-#define uPD98402_CMR 0x00 /* Command Register */
-#define uPD98402_MDR 0x01 /* Mode Register */
-#define uPD98402_PICR 0x02 /* PHY Interrupt Cause Register */
-#define uPD98402_PIMR 0x03 /* PHY Interrupt Mask Register */
-#define uPD98402_ACR 0x04 /* Alarm Cause Register */
-#define uPD98402_ACMR 0x05 /* Alarm Cause Mask Register */
-#define uPD98402_PCR 0x06 /* Performance Cause Register */
-#define uPD98402_PCMR 0x07 /* Performance Cause Mask Register */
-#define uPD98402_IACM 0x08 /* Internal Alarm Cause Mask Register */
-#define uPD98402_B1ECT 0x09 /* B1 Error Count Register */
-#define uPD98402_B2ECT 0x0a /* B2 Error Count Register */
-#define uPD98402_B3ECT 0x0b /* B3 Error Count Regster */
-#define uPD98402_PFECB 0x0c /* Path FEBE Count Register */
-#define uPD98402_LECCT 0x0d /* Line FEBE Count Register */
-#define uPD98402_HECCT 0x0e /* HEC Error Count Register */
-#define uPD98402_FJCT 0x0f /* Frequence Justification Count Reg */
-#define uPD98402_PCOCR 0x10 /* Perf. Counter Overflow Cause Reg */
-#define uPD98402_PCOMR 0x11 /* Perf. Counter Overflow Mask Reg */
-#define uPD98402_C11T 0x20 /* C11T Data Register */
-#define uPD98402_C12T 0x21 /* C12T Data Register */
-#define uPD98402_C13T 0x22 /* C13T Data Register */
-#define uPD98402_F1T 0x23 /* F1T Data Register */
-#define uPD98402_K2T 0x25 /* K2T Data Register */
-#define uPD98402_C2T 0x26 /* C2T Data Register */
-#define uPD98402_F2T 0x27 /* F2T Data Register */
-#define uPD98402_C11R 0x30 /* C11T Data Register */
-#define uPD98402_C12R 0x31 /* C12T Data Register */
-#define uPD98402_C13R 0x32 /* C13T Data Register */
-#define uPD98402_F1R 0x33 /* F1T Data Register */
-#define uPD98402_K2R 0x35 /* K2T Data Register */
-#define uPD98402_C2R 0x36 /* C2T Data Register */
-#define uPD98402_F2R 0x37 /* F2T Data Register */
-
-/* CMR is at 0x00 */
-#define uPD98402_CMR_PFRF 0x01 /* Send path FERF */
-#define uPD98402_CMR_LFRF 0x02 /* Send line FERF */
-#define uPD98402_CMR_PAIS 0x04 /* Send path AIS */
-#define uPD98402_CMR_LAIS 0x08 /* Send line AIS */
-
-/* MDR is at 0x01 */
-#define uPD98402_MDR_ALP 0x01 /* ATM layer loopback */
-#define uPD98402_MDR_TPLP 0x02 /* PMD loopback, to host */
-#define uPD98402_MDR_RPLP 0x04 /* PMD loopback, to network */
-#define uPD98402_MDR_SS0 0x08 /* SS0 */
-#define uPD98402_MDR_SS1 0x10 /* SS1 */
-#define uPD98402_MDR_SS_MASK 0x18 /* mask */
-#define uPD98402_MDR_SS_SHIFT 3 /* shift */
-#define uPD98402_MDR_HEC 0x20 /* disable HEC inbound processing */
-#define uPD98402_MDR_FSR 0x40 /* disable frame scrambler */
-#define uPD98402_MDR_CSR 0x80 /* disable cell scrambler */
-
-/* PICR is at 0x02, PIMR is at 0x03 */
-#define uPD98402_INT_PFM 0x01 /* performance counter has changed */
-#define uPD98402_INT_ALM 0x02 /* line fault */
-#define uPD98402_INT_RFO 0x04 /* receive FIFO overflow */
-#define uPD98402_INT_PCO 0x08 /* performance counter overflow */
-#define uPD98402_INT_OTD 0x20 /* OTD has occurred */
-#define uPD98402_INT_LOS 0x40 /* Loss Of Signal */
-#define uPD98402_INT_LOF 0x80 /* Loss Of Frame */
-
-/* ACR is as 0x04, ACMR is at 0x05 */
-#define uPD98402_ALM_PFRF 0x01 /* path FERF */
-#define uPD98402_ALM_LFRF 0x02 /* line FERF */
-#define uPD98402_ALM_PAIS 0x04 /* path AIS */
-#define uPD98402_ALM_LAIS 0x08 /* line AIS */
-#define uPD98402_ALM_LOD 0x10 /* loss of delineation */
-#define uPD98402_ALM_LOP 0x20 /* loss of pointer */
-#define uPD98402_ALM_OOF 0x40 /* out of frame */
-
-/* PCR is at 0x06, PCMR is at 0x07 */
-#define uPD98402_PFM_PFEB 0x01 /* path FEBE */
-#define uPD98402_PFM_LFEB 0x02 /* line FEBE */
-#define uPD98402_PFM_B3E 0x04 /* B3 error */
-#define uPD98402_PFM_B2E 0x08 /* B2 error */
-#define uPD98402_PFM_B1E 0x10 /* B1 error */
-#define uPD98402_PFM_FJ 0x20 /* frequency justification */
-
-/* IACM is at 0x08 */
-#define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */
-#define uPD98402_IACM_LFRF 0x02 /* don't generate line FERF */
-
-/* PCOCR is at 0x010, PCOMR is at 0x11 */
-#define uPD98402_PCO_B1EC 0x01 /* B1ECT overflow */
-#define uPD98402_PCO_B2EC 0x02 /* B2ECT overflow */
-#define uPD98402_PCO_B3EC 0x04 /* B3ECT overflow */
-#define uPD98402_PCO_PFBC 0x08 /* PFEBC overflow */
-#define uPD98402_PCO_LFBC 0x10 /* LFEVC overflow */
-#define uPD98402_PCO_HECC 0x20 /* HECCT overflow */
-#define uPD98402_PCO_FJC 0x40 /* FJCT overflow */
-
-
-int uPD98402_init(struct atm_dev *dev);
-
-#endif
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
deleted file mode 100644
index cf5fffcf98a1..000000000000
--- a/drivers/atm/zatm.c
+++ /dev/null
@@ -1,1652 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/atm_zatm.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/nospec.h>
-
-#include "uPD98401.h"
-#include "uPD98402.h"
-#include "zeprom.h"
-#include "zatm.h"
-
-
-/*
- * TODO:
- *
- * Minor features
- * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
- * - proper use of CDV, credit = max(1,CDVT*PCR)
- * - AAL0
- * - better receive timestamps
- * - OAM
- */
-
-#define ZATM_COPPER 1
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-#ifndef CONFIG_ATM_ZATM_DEBUG
-
-
-#define NULLCHECK(x)
-
-#define EVENT(s,a,b)
-
-
-static void event_dump(void)
-{
-}
-
-
-#else
-
-
-/*
- * NULL pointer checking
- */
-
-#define NULLCHECK(x) \
- if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
-
-/*
- * Very extensive activity logging. Greatly improves bug detection speed but
- * costs a few Mbps if enabled.
- */
-
-#define EV 64
-
-static const char *ev[EV];
-static unsigned long ev_a[EV],ev_b[EV];
-static int ec = 0;
-
-
-static void EVENT(const char *s,unsigned long a,unsigned long b)
-{
- ev[ec] = s;
- ev_a[ec] = a;
- ev_b[ec] = b;
- ec = (ec+1) % EV;
-}
-
-
-static void event_dump(void)
-{
- int n,i;
-
- printk(KERN_NOTICE "----- event dump follows -----\n");
- for (n = 0; n < EV; n++) {
- i = (ec+n) % EV;
- printk(KERN_NOTICE);
- printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
- }
- printk(KERN_NOTICE "----- event dump ends here -----\n");
-}
-
-
-#endif /* CONFIG_ATM_ZATM_DEBUG */
-
-
-#define RING_BUSY 1 /* indication from do_tx that PDU has to be
- backlogged */
-
-static struct atm_dev *zatm_boards = NULL;
-static unsigned long dummy[2] = {0,0};
-
-
-#define zin_n(r) inl(zatm_dev->base+r*4)
-#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
-#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
-#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
-
-/* RX0, RX1, TX0, TX1 */
-static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
-static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
-
-#define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
-
-
-/*-------------------------------- utilities --------------------------------*/
-
-
-static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
-{
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
-{
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER);
-}
-
-
-/*------------------------------- free lists --------------------------------*/
-
-
-/*
- * Free buffer head structure:
- * [0] pointer to buffer (for SAR)
- * [1] buffer descr link pointer (for SAR)
- * [2] back pointer to skb (for poll_rx)
- * [3] data
- * ...
- */
-
-struct rx_buffer_head {
- u32 buffer; /* pointer to buffer (for SAR) */
- u32 link; /* buffer descriptor link pointer (for SAR) */
- struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
-};
-
-
-static void refill_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- struct sk_buff *skb;
- struct rx_buffer_head *first;
- unsigned long flags;
- int align,offset,free,count,size;
-
- EVENT("refill_pool\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
- pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
- if (size < PAGE_SIZE) {
- align = 32; /* for 32 byte alignment */
- offset = sizeof(struct rx_buffer_head);
- }
- else {
- align = 4096;
- offset = zatm_dev->pool_info[pool].offset+
- sizeof(struct rx_buffer_head);
- }
- size += align;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
- uPD98401_RXFP_REMAIN;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- if (free >= zatm_dev->pool_info[pool].low_water) return;
- EVENT("starting ... POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- count = 0;
- first = NULL;
- while (free < zatm_dev->pool_info[pool].high_water) {
- struct rx_buffer_head *head;
-
- skb = alloc_skb(size,GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
- "skb (%d) with %d free\n",dev->number,size,free);
- break;
- }
- skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
- align+offset-1) & ~(unsigned long) (align-1))-offset)-
- skb->data);
- head = (struct rx_buffer_head *) skb->data;
- skb_reserve(skb,sizeof(struct rx_buffer_head));
- if (!first) first = head;
- count++;
- head->buffer = virt_to_bus(skb->data);
- head->link = 0;
- head->skb = skb;
- EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
- (unsigned long) head);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (zatm_dev->last_free[pool])
- ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
- data))[-1].link = virt_to_bus(head);
- zatm_dev->last_free[pool] = skb;
- skb_queue_tail(&zatm_dev->pool[pool],skb);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- free++;
- }
- if (first) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(virt_to_bus(first),CER);
- zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
- CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT ("POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- }
-}
-
-
-static void drain_free(struct atm_dev *dev,int pool)
-{
- skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
-}
-
-
-static int pool_index(int max_pdu)
-{
- int i;
-
- if (max_pdu % ATM_CELL_PAYLOAD)
- printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
- "max_pdu is %d\n",max_pdu);
- if (max_pdu > 65536) return -1;
- for (i = 0; (64 << i) < max_pdu; i++);
- return i+ZATM_AAL5_POOL_BASE;
-}
-
-
-/* use_pool isn't reentrant */
-
-
-static void use_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- int size;
-
- zatm_dev = ZATM_DEV(dev);
- if (!(zatm_dev->pool_info[pool].ref_count++)) {
- skb_queue_head_init(&zatm_dev->pool[pool]);
- size = pool-ZATM_AAL5_POOL_BASE;
- if (size < 0) size = 0; /* 64B... */
- else if (size > 10) size = 10; /* ... 64kB */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
- uPD98401_RXFP_ALERT_SHIFT) |
- (1 << uPD98401_RXFP_BTSZ_SHIFT) |
- (size << uPD98401_RXFP_BFSZ_SHIFT),
- zatm_dev->pool_base+pool*2);
- zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
- pool*2+1);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->last_free[pool] = NULL;
- refill_pool(dev,pool);
- }
- DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
-}
-
-
-static void unuse_pool(struct atm_dev *dev,int pool)
-{
- if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
- drain_free(dev,pool);
-}
-
-/*----------------------------------- RX ------------------------------------*/
-
-
-#if 0
-static void exception(struct atm_vcc *vcc)
-{
- static int count = 0;
- struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
- struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
- unsigned long *qrp;
- int i;
-
- if (count++ > 2) return;
- for (i = 0; i < 8; i++)
- printk("TX%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
- for (i = 0; i < 5; i++)
- printk("SH%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
- qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP);
- printk("qrp=0x%08lx\n",(unsigned long) qrp);
- for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
-}
-#endif
-
-
-static const char *err_txt[] = {
- "No error",
- "RX buf underflow",
- "RX FIFO overrun",
- "Maximum len violation",
- "CRC error",
- "User abort",
- "Length violation",
- "T1 error",
- "Deactivated",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???"
-};
-
-
-static void poll_rx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
- int error;
-
- EVENT("poll_rx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- u32 *here;
- struct sk_buff *skb;
- struct atm_vcc *vcc;
- int cells,size,chan;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- here = (u32 *) pos;
- if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- cells = here[0] & uPD98401_AAL5_SIZE;
-#if 0
-printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
-{
-unsigned long *x;
- printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
- zatm_dev->pool_base),
- zpeekl(zatm_dev,zatm_dev->pool_base+1));
- x = (unsigned long *) here[2];
- printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
- x[0],x[1],x[2],x[3]);
-}
-#endif
- error = 0;
- if (here[3] & uPD98401_AAL5_ERR) {
- error = (here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT;
- if (error == uPD98401_AAL5_ES_DEACT ||
- error == uPD98401_AAL5_ES_FREE) continue;
- }
-EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT,error);
- skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
- __net_timestamp(skb);
-#if 0
-printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
- ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
- ((unsigned *) skb->data)[0]);
-#endif
- EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
- (unsigned long) here);
-#if 0
-printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
-#endif
- size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
- ATM_CELL_PAYLOAD/sizeof(u16)-3]);
- EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
- chan = (here[3] & uPD98401_AAL5_CHAN) >>
- uPD98401_AAL5_CHAN_SHIFT;
- if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
- int pos;
- vcc = zatm_dev->rx_map[chan];
- pos = ZATM_VCC(vcc)->pool;
- if (skb == zatm_dev->last_free[pos])
- zatm_dev->last_free[pos] = NULL;
- skb_unlink(skb, zatm_dev->pool + pos);
- }
- else {
- printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
- "for non-existing channel\n",dev->number);
- size = 0;
- vcc = NULL;
- event_dump();
- }
- if (error) {
- static unsigned long silence = 0;
- static int last_error = 0;
-
- if (error != last_error ||
- time_after(jiffies, silence) || silence == 0){
- printk(KERN_WARNING DEV_LABEL "(itf %d): "
- "chan %d error %s\n",dev->number,chan,
- err_txt[error]);
- last_error = error;
- silence = (jiffies+2*HZ)|1;
- }
- size = 0;
- }
- if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
- size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
- "cells\n",dev->number,size,cells);
- size = 0;
- event_dump();
- }
- if (size > ATM_MAX_AAL5_PDU) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
- "(%d)\n",dev->number,size);
- size = 0;
- event_dump();
- }
- if (!size) {
- dev_kfree_skb_irq(skb);
- if (vcc) atomic_inc(&vcc->stats->rx_err);
- continue;
- }
- if (!atm_charge(vcc,skb->truesize)) {
- dev_kfree_skb_irq(skb);
- continue;
- }
- skb->len = size;
- ATM_SKB(skb)->vcc = vcc;
- vcc->push(vcc,skb);
- atomic_inc(&vcc->stats->rx);
- }
- zout(pos & 0xffff,MTA(mbx));
-#if 0 /* probably a stupid idea */
- refill_pool(dev,zatm_vcc->pool);
- /* maybe this saves us a few interrupts */
-#endif
-}
-
-
-static int open_rx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- unsigned short chan;
- int cells;
-
- DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->rx_chan = 0;
- if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
- if (vcc->qos.aal == ATM_AAL5) {
- if (vcc->qos.rxtp.max_sdu > 65464)
- vcc->qos.rxtp.max_sdu = 65464;
- /* fix this - we may want to receive 64kB SDUs
- later */
- cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
- ATM_CELL_PAYLOAD);
- zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
- }
- else {
- cells = 1;
- zatm_vcc->pool = ZATM_AAL0_POOL;
- }
- if (zatm_vcc->pool < 0) return -EMSGSIZE;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- use_pool(vcc->dev,zatm_vcc->pool);
- DPRINTK("pool %d\n",zatm_vcc->pool);
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
- chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
- uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
- zatm_vcc->rx_chan = chan;
- zatm_dev->rx_map[chan] = vcc;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static int open_rx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->rx_chan) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- /* should also handle VPI @@@ */
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
- ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static void close_rx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- if (!zatm_vcc->rx_chan) return;
- DPRINTK("close_rx\n");
- /* disable receiver */
- if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
- zwait();
- zout(uPD98401_NOP,CMR);
- zwait();
- zout(uPD98401_NOP,CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- udelay(10); /* why oh why ... ? */
- zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
- "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
- zatm_vcc->rx_chan = 0;
- unuse_pool(vcc->dev,zatm_vcc->pool);
-}
-
-
-static int start_rx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_rx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->rx_map = kcalloc(zatm_dev->chans,
- sizeof(*zatm_dev->rx_map),
- GFP_KERNEL);
- if (!zatm_dev->rx_map) return -ENOMEM;
- /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
- zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
- /* prepare free buffer pools */
- for (i = 0; i <= ZATM_LAST_POOL; i++) {
- zatm_dev->pool_info[i].ref_count = 0;
- zatm_dev->pool_info[i].rqa_count = 0;
- zatm_dev->pool_info[i].rqu_count = 0;
- zatm_dev->pool_info[i].low_water = LOW_MARK;
- zatm_dev->pool_info[i].high_water = HIGH_MARK;
- zatm_dev->pool_info[i].offset = 0;
- zatm_dev->pool_info[i].next_off = 0;
- zatm_dev->pool_info[i].next_cnt = 0;
- zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
- }
- return 0;
-}
-
-
-/*----------------------------------- TX ------------------------------------*/
-
-
-static int do_tx(struct sk_buff *skb)
-{
- struct atm_vcc *vcc;
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- u32 *dsc;
- unsigned long flags;
-
- EVENT("do_tx\n",0,0);
- DPRINTK("sending skb %p\n",skb);
- vcc = ATM_SKB(skb)->vcc;
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (!skb_shinfo(skb)->nr_frags) {
- if (zatm_vcc->txing == RING_ENTRIES-1) {
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return RING_BUSY;
- }
- zatm_vcc->txing++;
- dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
- zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
- (RING_ENTRIES*RING_WORDS-1);
- dsc[1] = 0;
- dsc[2] = skb->len;
- dsc[3] = virt_to_bus(skb->data);
- mb();
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
- | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
- }
- else {
-printk("NONONONOO!!!!\n");
- dsc = NULL;
-#if 0
- u32 *put;
- int i;
-
- dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
- uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
- if (!dsc) {
- if (vcc->pop)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_irq(skb);
- return -EAGAIN;
- }
- /* @@@ should check alignment */
- put = dsc+8;
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
- (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- dsc[1] = 0;
- dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
- dsc[3] = virt_to_bus(put);
- for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
- *put++ = ((struct iovec *) skb->data)[i].iov_len;
- *put++ = virt_to_bus(((struct iovec *)
- skb->data)[i].iov_base);
- }
- put[-2] |= uPD98401_TXBD_LAST;
-#endif
- }
- ZATM_PRV_DSC(skb) = dsc;
- skb_queue_tail(&zatm_vcc->tx_queue,skb);
- DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP));
- zwait();
- zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT("done\n",0,0);
- return 0;
-}
-
-
-static inline void dequeue_tx(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- struct sk_buff *skb;
-
- EVENT("dequeue_tx\n",0,0);
- zatm_vcc = ZATM_VCC(vcc);
- skb = skb_dequeue(&zatm_vcc->tx_queue);
- if (!skb) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
- "txing\n",vcc->dev->number);
- return;
- }
-#if 0 /* @@@ would fail on CLP */
-if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
- uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
- *ZATM_PRV_DSC(skb));
-#endif
- *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
- zatm_vcc->txing--;
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb_irq(skb);
- while ((skb = skb_dequeue(&zatm_vcc->backlog)))
- if (do_tx(skb) == RING_BUSY) {
- skb_queue_head(&zatm_vcc->backlog,skb);
- break;
- }
- atomic_inc(&vcc->stats->tx);
- wake_up(&zatm_vcc->tx_wait);
-}
-
-
-static void poll_tx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
-
- EVENT("poll_tx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- int chan;
-
-#if 1
- u32 data,*addr;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- addr = (u32 *) pos;
- data = *addr;
- chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
- EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
- data);
- EVENT("chan = %d\n",chan,0);
-#else
-NO !
- chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
- >> uPD98401_TXI_CONN_SHIFT;
-#endif
- if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
- dequeue_tx(zatm_dev->tx_map[chan]);
- else {
- printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
- "for non-existing channel %d\n",dev->number,chan);
- event_dump();
- }
- if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- }
- zout(pos & 0xffff,MTA(mbx));
-}
-
-
-/*
- * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
- */
-
-static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- unsigned long i,m,c;
- int shaper;
-
- DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
- zatm_dev = ZATM_DEV(dev);
- if (!zatm_dev->free_shapers) return -EAGAIN;
- for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
- zatm_dev->free_shapers &= ~1 << shaper;
- if (ubr) {
- c = 5;
- i = m = 1;
- zatm_dev->ubr_ref_cnt++;
- zatm_dev->ubr = shaper;
- *pcr = 0;
- }
- else {
- if (min) {
- if (min <= 255) {
- i = min;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = ATM_OC3_PCR*255/min;
- }
- }
- else {
- if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
- if (max <= 255) {
- i = max;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
- }
- }
- if (i > m) {
- printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
- "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
- m = i;
- }
- *pcr = i*ATM_OC3_PCR/m;
- c = 20; /* @@@ should use max_cdv ! */
- if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
- if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
- zatm_dev->tx_bw -= *pcr;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
- zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
- zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
- zpokel(zatm_dev,0,uPD98401_X(shaper));
- zpokel(zatm_dev,0,uPD98401_Y(shaper));
- zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return shaper;
-}
-
-
-static void dealloc_shaper(struct atm_dev *dev,int shaper)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- if (shaper == zatm_dev->ubr) {
- if (--zatm_dev->ubr_ref_cnt) return;
- zatm_dev->ubr = -1;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
- uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->free_shapers |= 1 << shaper;
-}
-
-
-static void close_tx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int chan;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- chan = zatm_vcc->tx_chan;
- if (!chan) return;
- DPRINTK("close_tx\n");
- if (skb_peek(&zatm_vcc->backlog)) {
- printk("waiting for backlog to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
- }
- if (skb_peek(&zatm_vcc->tx_queue)) {
- printk("waiting for TX queue to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
-#if 0
- zwait();
- zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
-#endif
- zwait();
- zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
- "%d\n",vcc->dev->number,chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_vcc->tx_chan = 0;
- zatm_dev->tx_map[chan] = NULL;
- if (zatm_vcc->shaper != zatm_dev->ubr) {
- zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
- dealloc_shaper(vcc->dev,zatm_vcc->shaper);
- }
- kfree(zatm_vcc->ring);
-}
-
-
-static int open_tx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- u32 *loop;
- unsigned short chan;
- int unlimited;
-
- DPRINTK("open_tx_first\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->tx_chan = 0;
- if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
- (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
- vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
- if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
- else {
- int pcr;
-
- if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
- if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
- vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
- < 0) {
- close_tx(vcc);
- return zatm_vcc->shaper;
- }
- if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
- vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
- }
- zatm_vcc->tx_chan = chan;
- skb_queue_head_init(&zatm_vcc->tx_queue);
- init_waitqueue_head(&zatm_vcc->tx_wait);
- /* initialize ring */
- zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
- if (!zatm_vcc->ring) return -ENOMEM;
- loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
- loop[0] = uPD98401_TXPD_V;
- loop[1] = loop[2] = 0;
- loop[3] = virt_to_bus(zatm_vcc->ring);
- zatm_vcc->ring_curr = 0;
- zatm_vcc->txing = 0;
- skb_queue_head_init(&zatm_vcc->backlog);
- zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
- chan*VC_SIZE/4+uPD98401_TXVC_QRP);
- return 0;
-}
-
-
-static int open_tx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
-
- DPRINTK("open_tx_second\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->tx_chan) return 0;
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
- uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
- vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
- return 0;
-}
-
-
-static int start_tx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_tx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
- sizeof(*zatm_dev->tx_map),
- GFP_KERNEL);
- if (!zatm_dev->tx_map) return -ENOMEM;
- zatm_dev->tx_bw = ATM_OC3_PCR;
- zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
- zatm_dev->ubr = -1;
- zatm_dev->ubr_ref_cnt = 0;
- /* initialize shapers */
- for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
- return 0;
-}
-
-
-/*------------------------------- interrupts --------------------------------*/
-
-
-static irqreturn_t zatm_int(int irq,void *dev_id)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- u32 reason;
- int handled = 0;
-
- dev = dev_id;
- zatm_dev = ZATM_DEV(dev);
- while ((reason = zin(GSR))) {
- handled = 1;
- EVENT("reason 0x%x\n",reason,0);
- if (reason & uPD98401_INT_PI) {
- EVENT("PHY int\n",0,0);
- dev->phy->interrupt(dev);
- }
- if (reason & uPD98401_INT_RQA) {
- unsigned long pools;
- int i;
-
- pools = zin(RQA);
- EVENT("RQA (0x%08x)\n",pools,0);
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqa_count++;
- }
- pools >>= 1;
- }
- }
- if (reason & uPD98401_INT_RQU) {
- unsigned long pools;
- int i;
- pools = zin(RQU);
- printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
- dev->number,pools);
- event_dump();
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqu_count++;
- }
- pools >>= 1;
- }
- }
- /* don't handle RD */
- if (reason & uPD98401_INT_SPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_CPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
- "parity error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_SBE) {
- printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- event_dump();
- }
- /* don't handle IND */
- if (reason & uPD98401_INT_MF) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
- "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
- >> uPD98401_INT_MF_SHIFT);
- event_dump();
- /* @@@ should try to recover */
- }
- if (reason & uPD98401_INT_MM) {
- if (reason & 1) poll_rx(dev,0);
- if (reason & 2) poll_rx(dev,1);
- if (reason & 4) poll_tx(dev,2);
- if (reason & 8) poll_tx(dev,3);
- }
- /* @@@ handle RCRn */
- }
- return IRQ_RETVAL(handled);
-}
-
-
-/*----------------------------- (E)EPROM access -----------------------------*/
-
-
-static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
- unsigned short cmd)
-{
- int error;
-
- if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
- printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
- error);
-}
-
-
-static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
-{
- unsigned int value;
- int error;
-
- if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
- printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
- error);
- return value;
-}
-
-
-static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
- int bits, unsigned short cmd)
-{
- unsigned long value;
- int i;
-
- for (i = bits-1; i >= 0; i--) {
- value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
- eprom_set(zatm_dev,value,cmd);
- eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
- eprom_set(zatm_dev,value,cmd);
- }
-}
-
-
-static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
- unsigned short cmd)
-{
- int i;
-
- *byte = 0;
- for (i = 8; i; i--) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
- *byte <<= 1;
- if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- }
-}
-
-
-static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
- int swap)
-{
- unsigned char buf[ZEPROM_SIZE];
- struct zatm_dev *zatm_dev;
- int i;
-
- zatm_dev = ZATM_DEV(dev);
- for (i = 0; i < ZEPROM_SIZE; i += 2) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
- eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
- eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
- eprom_get_byte(zatm_dev,buf+i+swap,cmd);
- eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
- eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
- }
- memcpy(dev->esi,buf+offset,ESI_LEN);
- return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
-}
-
-
-static void eprom_get_esi(struct atm_dev *dev)
-{
- if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
- (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
-}
-
-
-/*--------------------------------- entries ---------------------------------*/
-
-
-static int zatm_init(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- struct pci_dev *pci_dev;
- unsigned short command;
- int error,i,last;
- unsigned long t0,t1,t2;
-
- DPRINTK(">zatm_init\n");
- zatm_dev = ZATM_DEV(dev);
- spin_lock_init(&zatm_dev->lock);
- pci_dev = zatm_dev->pci_dev;
- zatm_dev->base = pci_resource_start(pci_dev, 0);
- zatm_dev->irq = pci_dev->irq;
- if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
- dev->number,error);
- return -EINVAL;
- }
- if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
- command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
- "\n",dev->number,error);
- return -EIO;
- }
- eprom_get_esi(dev);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
- dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
- /* reset uPD98401 */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
- last = MAX_CRAM_SIZE;
- for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
- zpokel(zatm_dev,0x55555555,i);
- if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
- else {
- zpokel(zatm_dev,0xAAAAAAAA,i);
- if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
- else zpokel(zatm_dev,i,i);
- }
- }
- for (i = 0; i < last; i += RAM_INCREMENT)
- if (zpeekl(zatm_dev,i) != i) break;
- zatm_dev->mem = i << 2;
- while (i) zpokel(zatm_dev,0,--i);
- /* reset again to rebuild memory pointers */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
- uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
- /* TODO: should shrink allocation now */
- printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
- "MMF");
- for (i = 0; i < ESI_LEN; i++)
- printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
- do {
- unsigned long flags;
-
- spin_lock_irqsave(&zatm_dev->lock, flags);
- t0 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(10);
- t1 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(1010);
- t2 = zpeekl(zatm_dev,uPD98401_TSR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
- zatm_dev->khz = t2-2*t1+t0;
- printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
- "MHz\n",dev->number,
- (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
- zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
- return uPD98402_init(dev);
-}
-
-
-static int zatm_start(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev = ZATM_DEV(dev);
- struct pci_dev *pdev = zatm_dev->pci_dev;
- unsigned long curr;
- int pools,vccs,rx;
- int error, i, ld;
-
- DPRINTK("zatm_start\n");
- zatm_dev->rx_map = zatm_dev->tx_map = NULL;
- for (i = 0; i < NR_MBX; i++)
- zatm_dev->mbx_start[i] = 0;
- error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
- if (error < 0) {
- printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
- dev->number,zatm_dev->irq);
- goto done;
- }
- /* define memory regions */
- pools = NR_POOLS;
- if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
- pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
- vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
- (2*VC_SIZE+RX_SIZE);
- ld = -1;
- for (rx = 1; rx < vccs; rx <<= 1) ld++;
- dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
- dev->ci_range.vci_bits = ld;
- dev->link_rate = ATM_OC3_PCR;
- zatm_dev->chans = vccs; /* ??? */
- curr = rx*RX_SIZE/4;
- DPRINTK("RX pool 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
- zatm_dev->pool_base = curr;
- curr += pools*POOL_SIZE/4;
- DPRINTK("Shapers 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
- curr += NR_SHAPERS*SHAPER_SIZE/4;
- DPRINTK("Free 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
- printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
- "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
- (zatm_dev->mem-curr*4)/VC_SIZE);
- /* create mailboxes */
- for (i = 0; i < NR_MBX; i++) {
- void *mbx;
- dma_addr_t mbx_dma;
-
- if (!mbx_entries[i])
- continue;
- mbx = dma_alloc_coherent(&pdev->dev,
- 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
- if (!mbx) {
- error = -ENOMEM;
- goto out;
- }
- /*
- * Alignment provided by dma_alloc_coherent() isn't enough
- * for this device.
- */
- if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
- printk(KERN_ERR DEV_LABEL "(itf %d): system "
- "bus incompatible with driver\n", dev->number);
- dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
- error = -ENODEV;
- goto out;
- }
- DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
- zatm_dev->mbx_start[i] = (unsigned long)mbx;
- zatm_dev->mbx_dma[i] = mbx_dma;
- zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
- 0xffff;
- zout(mbx_dma >> 16, MSH(i));
- zout(mbx_dma, MSL(i));
- zout(zatm_dev->mbx_end[i], MBA(i));
- zout((unsigned long)mbx & 0xffff, MTA(i));
- zout((unsigned long)mbx & 0xffff, MWA(i));
- }
- error = start_tx(dev);
- if (error)
- goto out;
- error = start_rx(dev);
- if (error)
- goto out_tx;
- error = dev->phy->start(dev);
- if (error)
- goto out_rx;
- zout(0xffffffff,IMR); /* enable interrupts */
- /* enable TX & RX */
- zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
-done:
- return error;
-
-out_rx:
- kfree(zatm_dev->rx_map);
-out_tx:
- kfree(zatm_dev->tx_map);
-out:
- while (i-- > 0) {
- dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
- (void *)zatm_dev->mbx_start[i],
- zatm_dev->mbx_dma[i]);
- }
- free_irq(zatm_dev->irq, dev);
- goto done;
-}
-
-
-static void zatm_close(struct atm_vcc *vcc)
-{
- DPRINTK(">zatm_close\n");
- if (!ZATM_VCC(vcc)) return;
- clear_bit(ATM_VF_READY,&vcc->flags);
- close_rx(vcc);
- EVENT("close_tx\n",0,0);
- close_tx(vcc);
- DPRINTK("zatm_close: done waiting\n");
- /* deallocate memory */
- kfree(ZATM_VCC(vcc));
- vcc->dev_data = NULL;
- clear_bit(ATM_VF_ADDR,&vcc->flags);
-}
-
-
-static int zatm_open(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- short vpi = vcc->vpi;
- int vci = vcc->vci;
- int error;
-
- DPRINTK(">zatm_open\n");
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
- vcc->dev_data = NULL;
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR,&vcc->flags);
- if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
- DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
- vcc->vci);
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
- if (!zatm_vcc) {
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -ENOMEM;
- }
- vcc->dev_data = zatm_vcc;
- ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
- if ((error = open_rx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- }
- if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
- if ((error = open_rx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- set_bit(ATM_VF_READY,&vcc->flags);
- return 0;
-}
-
-
-static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- printk("Not yet implemented\n");
- return -ENOSYS;
- /* @@@ */
-}
-
-
-static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- switch (cmd) {
- case ZATM_GETPOOLZ:
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- fallthrough;
- case ZATM_GETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- info = zatm_dev->pool_info[pool];
- if (cmd == ZATM_GETPOOLZ) {
- zatm_dev->pool_info[pool].rqa_count = 0;
- zatm_dev->pool_info[pool].rqu_count = 0;
- }
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return copy_to_user(
- &((struct zatm_pool_req __user *) arg)->info,
- &info,sizeof(info)) ? -EFAULT : 0;
- }
- case ZATM_SETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- if (copy_from_user(&info,
- &((struct zatm_pool_req __user *) arg)->info,
- sizeof(info))) return -EFAULT;
- if (!info.low_water)
- info.low_water = zatm_dev->
- pool_info[pool].low_water;
- if (!info.high_water)
- info.high_water = zatm_dev->
- pool_info[pool].high_water;
- if (!info.next_thres)
- info.next_thres = zatm_dev->
- pool_info[pool].next_thres;
- if (info.low_water >= info.high_water ||
- info.low_water < 0)
- return -EINVAL;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zatm_dev->pool_info[pool].low_water =
- info.low_water;
- zatm_dev->pool_info[pool].high_water =
- info.high_water;
- zatm_dev->pool_info[pool].next_thres =
- info.next_thres;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
- }
- default:
- if (!dev->phy->ioctl) return -ENOIOCTLCMD;
- return dev->phy->ioctl(dev,cmd,arg);
- }
-}
-
-static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
-{
- int error;
-
- EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
- if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
- return -EINVAL;
- }
- if (!skb) {
- printk(KERN_CRIT "!skb in zatm_send ?\n");
- if (vcc->pop) vcc->pop(vcc,skb);
- return -EINVAL;
- }
- ATM_SKB(skb)->vcc = vcc;
- error = do_tx(skb);
- if (error != RING_BUSY) return error;
- skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
- return 0;
-}
-
-
-static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER) & 0xff;
-}
-
-
-static const struct atmdev_ops ops = {
- .open = zatm_open,
- .close = zatm_close,
- .ioctl = zatm_ioctl,
- .send = zatm_send,
- .phy_put = zatm_phy_put,
- .phy_get = zatm_phy_get,
- .change_qos = zatm_change_qos,
-};
-
-static int zatm_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- int ret = -ENOMEM;
-
- zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
- if (!zatm_dev) {
- printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
- goto out;
- }
-
- dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
- if (!dev)
- goto out_free;
-
- ret = pci_enable_device(pci_dev);
- if (ret < 0)
- goto out_deregister;
-
- ret = pci_request_regions(pci_dev, DEV_LABEL);
- if (ret < 0)
- goto out_disable;
-
- ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
- if (ret < 0)
- goto out_release;
-
- zatm_dev->pci_dev = pci_dev;
- dev->dev_data = zatm_dev;
- zatm_dev->copper = (int)ent->driver_data;
- if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
- goto out_release;
-
- pci_set_drvdata(pci_dev, dev);
- zatm_dev->more = zatm_boards;
- zatm_boards = dev;
- ret = 0;
-out:
- return ret;
-
-out_release:
- pci_release_regions(pci_dev);
-out_disable:
- pci_disable_device(pci_dev);
-out_deregister:
- atm_dev_deregister(dev);
-out_free:
- kfree(zatm_dev);
- goto out;
-}
-
-
-MODULE_LICENSE("GPL");
-
-static const struct pci_device_id zatm_pci_tbl[] = {
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
-
-static struct pci_driver zatm_driver = {
- .name = DEV_LABEL,
- .id_table = zatm_pci_tbl,
- .probe = zatm_init_one,
-};
-
-static int __init zatm_init_module(void)
-{
- return pci_register_driver(&zatm_driver);
-}
-
-module_init(zatm_init_module);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h
deleted file mode 100644
index 8204369fe825..000000000000
--- a/drivers/atm/zatm.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */
-
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#ifndef DRIVER_ATM_ZATM_H
-#define DRIVER_ATM_ZATM_H
-
-#include <linux/skbuff.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/pci.h>
-
-
-#define DEV_LABEL "zatm"
-
-#define MAX_AAL5_PDU 10240 /* allocate for AAL5 PDUs of this size */
-#define MAX_RX_SIZE_LD 14 /* ceil(log2((MAX_AAL5_PDU+47)/48)) */
-
-#define LOW_MARK 12 /* start adding new buffers if less than 12 */
-#define HIGH_MARK 30 /* stop adding buffers after reaching 30 */
-#define OFF_CNG_THRES 5 /* threshold for offset changes */
-
-#define RX_SIZE 2 /* RX lookup entry size (in bytes) */
-#define NR_POOLS 32 /* number of free buffer pointers */
-#define POOL_SIZE 8 /* buffer entry size (in bytes) */
-#define NR_SHAPERS 16 /* number of shapers */
-#define SHAPER_SIZE 4 /* shaper entry size (in bytes) */
-#define VC_SIZE 32 /* VC dsc (TX or RX) size (in bytes) */
-
-#define RING_ENTRIES 32 /* ring entries (without back pointer) */
-#define RING_WORDS 4 /* ring element size */
-#define RING_SIZE (sizeof(unsigned long)*(RING_ENTRIES+1)*RING_WORDS)
-
-#define NR_MBX 4 /* four mailboxes */
-#define MBX_RX_0 0 /* mailbox indices */
-#define MBX_RX_1 1
-#define MBX_TX_0 2
-#define MBX_TX_1 3
-
-struct zatm_vcc {
- /*-------------------------------- RX part */
- int rx_chan; /* RX channel, 0 if none */
- int pool; /* free buffer pool */
- /*-------------------------------- TX part */
- int tx_chan; /* TX channel, 0 if none */
- int shaper; /* shaper, <0 if none */
- struct sk_buff_head tx_queue; /* list of buffers in transit */
- wait_queue_head_t tx_wait; /* for close */
- u32 *ring; /* transmit ring */
- int ring_curr; /* current write position */
- int txing; /* number of transmits in progress */
- struct sk_buff_head backlog; /* list of buffers waiting for ring */
-};
-
-struct zatm_dev {
- /*-------------------------------- TX part */
- int tx_bw; /* remaining bandwidth */
- u32 free_shapers; /* bit set */
- int ubr; /* UBR shaper; -1 if none */
- int ubr_ref_cnt; /* number of VCs using UBR shaper */
- /*-------------------------------- RX part */
- int pool_ref[NR_POOLS]; /* free buffer pool usage counters */
- volatile struct sk_buff *last_free[NR_POOLS];
- /* last entry in respective pool */
- struct sk_buff_head pool[NR_POOLS];/* free buffer pools */
- struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */
- /*-------------------------------- maps */
- struct atm_vcc **tx_map; /* TX VCCs */
- struct atm_vcc **rx_map; /* RX VCCs */
- int chans; /* map size, must be 2^n */
- /*-------------------------------- mailboxes */
- unsigned long mbx_start[NR_MBX];/* start addresses */
- dma_addr_t mbx_dma[NR_MBX];
- u16 mbx_end[NR_MBX]; /* end offset (in bytes) */
- /*-------------------------------- other pointers */
- u32 pool_base; /* Free buffer pool dsc (word addr) */
- /*-------------------------------- ZATM links */
- struct atm_dev *more; /* other ZATM devices */
- /*-------------------------------- general information */
- int mem; /* RAM on board (in bytes) */
- int khz; /* timer clock */
- int copper; /* PHY type */
- unsigned char irq; /* IRQ */
- unsigned int base; /* IO base address */
- struct pci_dev *pci_dev; /* PCI stuff */
- spinlock_t lock;
-};
-
-
-#define ZATM_DEV(d) ((struct zatm_dev *) (d)->dev_data)
-#define ZATM_VCC(d) ((struct zatm_vcc *) (d)->dev_data)
-
-
-struct zatm_skb_prv {
- struct atm_skb_data _; /* reserved */
- u32 *dsc; /* pointer to skb's descriptor */
-};
-
-#define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc)
-
-#endif
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a311df07b1bd..4deb60a3b43f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
- SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
@@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
- SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d42ed7ff223e..0ddcf6da66c4 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
return action->device->ops.destroy_flow_action(action);
}
-static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
- u32 flags, bool is_modify)
-{
- u64 verbs_flags = flags;
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
-
- if (is_modify && uverbs_attr_is_valid(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS;
-
- return verbs_flags;
-};
-
-static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat)
-{
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm =
- &keymat->keymat.aes_gcm;
-
- if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return -EOPNOTSUPP;
-
- if (aes_gcm->key_len != 32 &&
- aes_gcm->key_len != 24 &&
- aes_gcm->key_len != 16)
- return -EINVAL;
-
- if (aes_gcm->icv_len != 16 &&
- aes_gcm->icv_len != 8 &&
- aes_gcm->icv_len != 12)
- return -EINVAL;
-
- return 0;
-}
-
-static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
-};
-
-static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* This is used in order to modify an esp flow action with an enabled
- * replay protection to a disabled one. This is only supported via
- * modify, as in create verb we can simply drop the REPLAY attribute and
- * achieve the same thing.
- */
- return is_modify ? 0 : -EINVAL;
-}
-
-static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* Some replay protections could always be enabled without validating
- * anything.
- */
- return 0;
-}
-
-static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify) = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none,
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok,
-};
-
-static int parse_esp_ip(enum ib_flow_spec_type proto,
- const void __user *val_ptr,
- size_t len, union ib_flow_spec *out)
-{
- int ret;
- const struct ib_uverbs_flow_ipv4_filter ipv4 = {
- .src_ip = cpu_to_be32(0xffffffffUL),
- .dst_ip = cpu_to_be32(0xffffffffUL),
- .proto = 0xff,
- .tos = 0xff,
- .ttl = 0xff,
- .flags = 0xff,
- };
- const struct ib_uverbs_flow_ipv6_filter ipv6 = {
- .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .flow_label = cpu_to_be32(0xffffffffUL),
- .next_hdr = 0xff,
- .traffic_class = 0xff,
- .hop_limit = 0xff,
- };
- union {
- struct ib_uverbs_flow_ipv4_filter ipv4;
- struct ib_uverbs_flow_ipv6_filter ipv6;
- } user_val = {};
- const void *user_pmask;
- size_t val_len;
-
- /* If the flow IPv4/IPv6 flow specifications are extended, the mask
- * should be changed as well.
- */
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
- sizeof(ipv4.flags) != sizeof(ipv4));
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
- sizeof(ipv6.reserved) != sizeof(ipv6));
-
- switch (proto) {
- case IB_FLOW_SPEC_IPV4:
- if (len > sizeof(user_val.ipv4) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
- len - sizeof(user_val.ipv4)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv4));
- ret = copy_from_user(&user_val.ipv4, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv4;
- break;
- case IB_FLOW_SPEC_IPV6:
- if (len > sizeof(user_val.ipv6) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
- len - sizeof(user_val.ipv6)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv6));
- ret = copy_from_user(&user_val.ipv6, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv6;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
- &user_val,
- val_len, out);
-}
-
-static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uverbs_flow_action_esp_encap uverbs_encap;
- int ret;
-
- ret = uverbs_copy_from(&uverbs_encap, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
- if (ret)
- return ret;
-
- /* We currently support only one encap */
- if (uverbs_encap.next_ptr)
- return -EOPNOTSUPP;
-
- if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
- uverbs_encap.type != IB_FLOW_SPEC_IPV6)
- return -EOPNOTSUPP;
-
- return parse_esp_ip(uverbs_encap.type,
- u64_to_user_ptr(uverbs_encap.val_ptr),
- uverbs_encap.len,
- &out->spec);
-}
-
-struct ib_flow_action_esp_attr {
- struct ib_flow_action_attrs_esp hdr;
- struct ib_flow_action_attrs_esp_keymats keymat;
- struct ib_flow_action_attrs_esp_replays replay;
- /* We currently support only one spec */
- struct ib_flow_spec_list encap;
-};
-
-#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
-static int parse_flow_action_esp(struct ib_device *ib_dev,
- struct uverbs_attr_bundle *attrs,
- struct ib_flow_action_esp_attr *esp_attr,
- bool is_modify)
-{
- struct ib_uverbs_flow_action_esp uverbs_esp = {};
- int ret;
-
- /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
- ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
- if (IS_UVERBS_COPY_ERR(ret))
- return ret;
-
- /* This can be called from FLOW_ACTION_ESP_MODIFY where
- * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
- */
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
- ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
- if (ret)
- return ret;
-
- if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
- return -EOPNOTSUPP;
-
- esp_attr->hdr.spi = uverbs_esp.spi;
- esp_attr->hdr.seq = uverbs_esp.seq;
- esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
- esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
- }
- esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags,
- is_modify);
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
- esp_attr->keymat.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- if (ret)
- return ret;
-
- ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat);
- if (ret)
- return ret;
-
- esp_attr->hdr.keymat = &esp_attr->keymat;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
- esp_attr->replay.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
-
- ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
- if (ret)
- return ret;
-
- ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay,
- is_modify);
- if (ret)
- return ret;
-
- esp_attr->hdr.replay = &esp_attr->replay;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
- ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
- if (ret)
- return ret;
-
- esp_attr->hdr.encap = &esp_attr->encap;
- }
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
- struct ib_device *ib_dev = attrs->context->device;
- int ret;
- struct ib_flow_action *action;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!ib_dev->ops.create_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
- if (ret)
- return ret;
-
- /* No need to check as this attribute is marked as MANDATORY */
- action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
- attrs);
- if (IS_ERR(action))
- return PTR_ERR(action);
-
- uverbs_flow_action_fill_action(action, uobj, ib_dev,
- IB_FLOW_ACTION_ESP);
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
- struct ib_flow_action *action = uobj->object;
- int ret;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!action->device->ops.modify_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
- if (ret)
- return ret;
-
- if (action->type != IB_FLOW_ACTION_ESP)
- return -EINVAL;
-
- return action->device->ops.modify_flow_action_esp(action,
- &esp_attr.hdr,
- attrs);
-}
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
- aes_key),
- },
-};
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_NO_DATA(),
- },
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
- size),
- },
-};
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_NEW,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_MANDATORY),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
- UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_WRITE,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_FLOW_ACTION_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
@@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
const struct uapi_definition uverbs_def_obj_flow_action[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 661ed2b44508..9c2886bc72cb 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -15,7 +15,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
#include <net/inet_ecn.h>
#include "mlx5_ib.h"
@@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
{
switch (maction->ib_action.type) {
- case IB_FLOW_ACTION_ESP:
- if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
- return -EINVAL;
- /* Currently only AES_GCM keymat is supported by the driver */
- action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= is_egress ?
- MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
- return 0;
case IB_FLOW_ACTION_UNSPECIFIED:
if (maction->flow_action_raw.sub_type ==
MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
@@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev,
ib_spec->type & IB_FLOW_SPEC_INNER);
break;
case IB_FLOW_SPEC_ESP:
- if (ib_spec->esp.mask.seq)
- return -EOPNOTSUPP;
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
- ntohl(ib_spec->esp.mask.spi));
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- ntohl(ib_spec->esp.val.spi));
- break;
+ return -EOPNOTSUPP;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
@@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
return false;
}
-enum valid_spec {
- VALID_SPEC_INVALID,
- VALID_SPEC_VALID,
- VALID_SPEC_NA,
-};
-
-static enum valid_spec
-is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- const u32 *match_c = spec->match_criteria;
- bool is_crypto =
- (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
- bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
- bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /*
- * Currently only crypto is supported in egress, when regular egress
- * rules would be supported, always return VALID_SPEC_NA.
- */
- if (!is_crypto)
- return VALID_SPEC_NA;
-
- return is_crypto && is_ipsec &&
- (!egress || (!is_drop &&
- !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
- VALID_SPEC_VALID : VALID_SPEC_INVALID;
-}
-
-static bool is_valid_spec(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- /* We curretly only support ipsec egress flow */
- return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
-}
-
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
const struct ib_flow_attr *flow_attr,
bool check_inner)
@@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- if (is_egress &&
- !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
+ if (is_egress) {
err = -EINVAL;
goto free;
}
@@ -1740,149 +1680,6 @@ unlock:
return ERR_PTR(err);
}
-static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
-{
- u32 flags = 0;
-
- if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
- flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
-
- return flags;
-}
-
-#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
- MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
-static struct ib_flow_action *
-mlx5_ib_create_flow_action_esp(struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dev *mdev = to_mdev(device);
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
- struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
- struct mlx5_ib_flow_action *action;
- u64 action_flags;
- u64 flags;
- int err = 0;
-
- err = uverbs_get_flags64(
- &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
- if (err)
- return ERR_PTR(err);
-
- flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
-
- /* We current only support a subset of the standard features. Only a
- * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
- * (with overlap). Full offload mode isn't supported.
- */
- if (!attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
- return ERR_PTR(-EOPNOTSUPP);
-
- if (attr->keymat->protocol !=
- IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
- return ERR_PTR(-EOPNOTSUPP);
-
- aes_gcm = &attr->keymat->keymat.aes_gcm;
-
- if (aes_gcm->icv_len != 16 ||
- aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return ERR_PTR(-EOPNOTSUPP);
-
- action = kmalloc(sizeof(*action), GFP_KERNEL);
- if (!action)
- return ERR_PTR(-ENOMEM);
-
- action->esp_aes_gcm.ib_flags = attr->flags;
- memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
- sizeof(accel_attrs.keymat.aes_gcm.aes_key));
- accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
- memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
- sizeof(accel_attrs.keymat.aes_gcm.salt));
- memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
- sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
- accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
- accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
- accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
- accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
-
- action->esp_aes_gcm.ctx =
- mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
- if (IS_ERR(action->esp_aes_gcm.ctx)) {
- err = PTR_ERR(action->esp_aes_gcm.ctx);
- goto err_parse;
- }
-
- action->esp_aes_gcm.ib_flags = attr->flags;
-
- return &action->ib_action;
-
-err_parse:
- kfree(action);
- return ERR_PTR(err);
-}
-
-static int
-mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_flow_action *maction = to_mflow_act(action);
- struct mlx5_accel_esp_xfrm_attrs accel_attrs;
- int err = 0;
-
- if (attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
- return -EOPNOTSUPP;
-
- /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
- * be modified.
- */
- if (!(maction->esp_aes_gcm.ib_flags &
- IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
- attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
- return -EINVAL;
-
- memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
- sizeof(accel_attrs));
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
- else
- accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
- &accel_attrs);
- if (err)
- return err;
-
- maction->esp_aes_gcm.ib_flags &=
- ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
- maction->esp_aes_gcm.ib_flags |=
- attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
-
- return 0;
-}
-
static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
switch (maction->flow_action_raw.sub_type) {
@@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
switch (action->type) {
- case IB_FLOW_ACTION_ESP:
- /*
- * We only support aes_gcm by now, so we implicitly know this is
- * the underline crypto.
- */
- mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
- break;
case IB_FLOW_ACTION_UNSPECIFIED:
destroy_flow_action_raw(maction);
break;
@@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = {
.destroy_flow_action = mlx5_ib_destroy_flow_action,
};
-static const struct ib_device_ops flow_ipsec_ops = {
- .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
- .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
-};
-
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
@@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 32a0ea820573..61aa196d6484 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -41,7 +41,6 @@
#include "wr.h"
#include "restrack.h"
#include "counters.h"
-#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
@@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_UDP |
MLX5_RX_HASH_DST_PORT_UDP |
MLX5_RX_HASH_INNER;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- resp.rss_caps.rx_hash_fields_mask |=
- MLX5_RX_HASH_IPSEC_SPI;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -1791,23 +1786,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev,
num_of_uars_per_page) : 1;
-
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE) {
- if (mlx5_get_flow_namespace(dev->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
- if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
- /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
- }
-
resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
resp->num_ports = dev->num_ports;
@@ -3605,13 +3583,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_flow_action,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- enum mlx5_ib_uapi_flow_action_flags));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_query_context,
UVERBS_OBJECT_DEVICE,
UVERBS_METHOD_QUERY_CONTEXT,
@@ -3628,8 +3599,6 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
- &mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index a6606736d8c5..2776ca5fc33f 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -121,7 +121,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (sk->sk_state == MISDN_CLOSED)
return 0;
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 3eff08d7b8e5..fe17c7f98e81 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -216,8 +216,12 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
raw->bpf_sample = sample;
- if (raw->progs)
- BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, bpf_prog_run);
+ if (raw->progs) {
+ rcu_read_lock();
+ bpf_prog_run_array(rcu_dereference(raw->progs),
+ &raw->bpf_sample, bpf_prog_run);
+ rcu_read_unlock();
+ }
}
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 38e152548126..c9e75a9de282 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5226,7 +5226,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..ac760fd39282 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -170,6 +170,7 @@ config PCH_CAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1e660afcb61b..0af85983634c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,6 +16,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..48963efc7f19
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core"
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on CAN_CTUCANFD
+ depends on PCI
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on CAN_CTUCANFD
+ depends on OF || COMPILE_TEST
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..2ada097d1ede
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1462 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return: True - Frame inserted successfully
+ * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ cf->flags = 0;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (work_done)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ devm_can_led_init(ndev);
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..edc1c1a24348
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+
+/* COMMAND registers */
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+
+/* TX_COMMAND registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+
+/* ERR_CAPT ALC registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..8f2956a8ae43
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (core_i < num_cores) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..89d54c2151e1
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Cannot remap address.\n");
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 2103bcca9012..c1e76f0a5064 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -116,7 +116,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
can_update_sample_point(btc, sample_point_nominal, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
+ if (sample_point_error >= best_sample_point_error)
continue;
best_sample_point_error = sample_point_error;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 7f80d8e1e750..6d0dc18c03e7 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -221,7 +221,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -240,7 +240,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
@@ -256,7 +256,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 74d7fcbfd065..fe9bda0f5ec4 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -723,11 +723,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -845,7 +843,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -892,7 +890,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -1700,11 +1698,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b3b5bc1c803b..e6d2da4a9f41 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -77,9 +77,6 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
/* message ram configuration data length */
#define MRAM_CFG_LEN 8
@@ -464,7 +461,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -951,7 +948,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
- work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
+ work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
@@ -1474,7 +1471,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi,
- m_can_poll, M_CAN_NAPI_WEIGHT);
+ m_can_poll, NAPI_POLL_WEIGHT);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
@@ -1994,7 +1991,7 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
- M_CAN_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
if (ret)
goto clk_disable;
}
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..65ba6697bd7d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..4b2f9cb17fc3 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -107,7 +107,7 @@ config CAN_TSCAN1
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index f9dd8fdba12b..b21252390216 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -37,6 +37,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -75,6 +81,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -916,7 +924,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1021,7 +1029,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
return 0;
mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1094,7 +1102,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1259,7 +1267,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -1697,7 +1706,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
else
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
"Detected %s, but firmware specifies a %s. Fixing up.\n",
@@ -1930,6 +1939,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -1946,6 +1958,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index d09f7fbf2ba7..ced8d9c81f8c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -173,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
}
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 9cb6b5ad8dda..1d43bccc29bf 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -586,7 +586,8 @@ struct mcp251xfd_regs_status {
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -659,12 +660,13 @@ struct mcp251xfd_priv {
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
{
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index ff31b993ab17..bb3f2e3b004c 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -633,7 +633,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
@@ -668,7 +668,7 @@ static void ti_hecc_change_state(struct net_device *ndev,
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e562c5ab1149..43f0c6a064ba 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index b2752978cb09..f91deea9368e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1027,40 +1027,7 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
-
- p = &dev->ports[port];
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index d74defcd86b4..4109433b6b6c 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -160,9 +160,6 @@
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 7310d19d1f06..61dd0fa97748 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -65,100 +65,6 @@ static const struct {
{ 0x83, "tx_discards" },
};
-struct ksz9477_stats_raw {
- u64 rx_hi;
- u64 rx_undersize;
- u64 rx_fragments;
- u64 rx_oversize;
- u64 rx_jabbers;
- u64 rx_symbol_err;
- u64 rx_crc_err;
- u64 rx_align_err;
- u64 rx_mac_ctrl;
- u64 rx_pause;
- u64 rx_bcast;
- u64 rx_mcast;
- u64 rx_ucast;
- u64 rx_64_or_less;
- u64 rx_65_127;
- u64 rx_128_255;
- u64 rx_256_511;
- u64 rx_512_1023;
- u64 rx_1024_1522;
- u64 rx_1523_2000;
- u64 rx_2001;
- u64 tx_hi;
- u64 tx_late_col;
- u64 tx_pause;
- u64 tx_bcast;
- u64 tx_mcast;
- u64 tx_ucast;
- u64 tx_deferred;
- u64 tx_total_col;
- u64 tx_exc_col;
- u64 tx_single_col;
- u64 tx_mult_col;
- u64 rx_total;
- u64 tx_total;
- u64 rx_discards;
- u64 tx_discards;
-};
-
-static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port)
-{
- struct rtnl_link_stats64 *stats;
- struct ksz9477_stats_raw *raw;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
- stats = &mib->stats64;
- raw = (struct ksz9477_stats_raw *)mib->counters;
-
- spin_lock(&mib->stats64_lock);
-
- stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
- stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
-
- /* HW counters are counting bytes + FCS which is not acceptable
- * for rtnl_link_stats64 interface
- */
- stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
- stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
-
- stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
- raw->rx_oversize;
-
- stats->rx_crc_errors = raw->rx_crc_err;
- stats->rx_frame_errors = raw->rx_align_err;
- stats->rx_dropped = raw->rx_discards;
- stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_dropped;
-
- stats->tx_window_errors = raw->tx_late_col;
- stats->tx_fifo_errors = raw->tx_discards;
- stats->tx_aborted_errors = raw->tx_exc_col;
- stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
- stats->tx_aborted_errors;
-
- stats->multicast = raw->rx_mcast;
- stats->collisions = raw->tx_total_col;
-
- spin_unlock(&mib->stats64_lock);
-}
-
-static void ksz9477_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
-
- spin_lock(&mib->stats64_lock);
- memcpy(s, &mib->stats64, sizeof(*s));
- spin_unlock(&mib->stats64_lock);
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -517,38 +423,7 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1523,7 +1398,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
.port_mirror_del = ksz9477_port_mirror_del,
- .get_stats64 = ksz9477_get_stats64,
+ .get_stats64 = ksz_get_stats64,
.port_change_mtu = ksz9477_change_mtu,
.port_max_mtu = ksz9477_max_mtu,
};
@@ -1714,7 +1589,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
- .r_mib_stat64 = ksz9477_r_mib_stats64,
+ .r_mib_stat64 = ksz_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 0bd58467181f..7a2c8d4767af 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1586,10 +1586,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8014b18d9391..10f127b09e58 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -20,6 +20,102 @@
#include "ksz_common.h"
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_r_mib_stats64);
+
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_get_stats64);
+
void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -372,6 +468,46 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(ksz_enable_port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ u8 data;
+
+ ksz_pread8(dev, port, reg, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, reg, data);
+
+ p = &dev->ports[port];
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
struct dsa_switch *ds;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 485d4a948c38..28cda79b090f 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -151,6 +151,9 @@ int ksz9477_switch_register(struct ksz_device *dev);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
/* Common DSA access functions */
@@ -165,6 +168,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
@@ -292,6 +297,11 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index fe3cb26f4287..2b02d823d497 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -24,6 +24,11 @@
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
@@ -2390,35 +2395,30 @@ mt7531_setup(struct dsa_switch *ds)
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
@@ -2426,42 +2426,35 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
+
case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
-}
-
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->phy_mode_supported(ds, port, state);
}
static int
@@ -2534,30 +2527,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
return 0;
}
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
-{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
-}
-
-static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
unsigned int val;
/* For adjusting speed and duplex of SGMII force mode. */
@@ -2583,6 +2557,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
/* MT7531 SGMII 1G force mode can only work in full duplex mode,
* no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ *
+ * The speed check is unnecessary as the MAC capabilities apply
+ * this restriction. --rmk
*/
if ((speed == SPEED_10 || speed == SPEED_100) &&
duplex != DUPLEX_FULL)
@@ -2658,9 +2635,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
return 0;
}
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 val;
/* Only restart AN when AN is enabled */
@@ -2717,6 +2695,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &priv->pcs[port].pcs;
+
+ default:
+ return NULL;
+ }
+}
+
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
@@ -2724,9 +2720,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
switch (port) {
case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
@@ -2781,17 +2774,6 @@ unsupported:
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
-}
-
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2801,16 +2783,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
-
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -2823,8 +2802,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
@@ -2904,81 +2881,51 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7531_sgmii_validate(priv, port, supported);
-}
-
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mt7530_priv *priv = ds->priv;
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
-
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- priv->info->mac_port_validate(ds, port, mask);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -3005,8 +2952,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static int
@@ -3048,33 +2993,59 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
-
- return -EOPNOTSUPP;
+ mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ else
+ state->link = false;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
- return priv->info->mac_port_get_state(ds, port, state);
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
+{
}
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
+
+static const struct phylink_pcs_ops mt7531_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7531_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7531_pcs_an_restart,
+ .pcs_link_up = mt7531_pcs_link_up,
+};
+
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret = priv->info->sw_setup(ds);
+ int i, ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+ ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3145,10 +3116,9 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
@@ -3158,39 +3128,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
static const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
.phy_read = mt7531_ind_phy_read,
.phy_write = mt7531_ind_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
},
};
@@ -3247,9 +3212,8 @@ mt7530_probe(struct mdio_device *mdiodev)
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
!priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..71e36b69b96d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface)
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
@@ -752,36 +758,27 @@ struct mt7530_priv;
* port
* @mac_port_validate: Holding the way to set addition validate type for a
* certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
int (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -823,6 +820,7 @@ struct mt7530_priv {
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
int irq;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 64f4fdd02902..53fd12e7a21c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -6276,6 +6276,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
return 0;
}
+static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
+ struct mdio_device *mdiodev)
+{
+ int err;
+
+ /* dual_chip takes precedence over single/multi-chip modes */
+ if (chip->info->dual_chip)
+ return -EINVAL;
+
+ /* If the mdio addr is 16 indicating the first port address of a switch
+ * (e.g. mv88e6*41) in single chip addressing mode the device may be
+ * configured in single chip addressing mode. Setup the smi access as
+ * single chip addressing mode and attempt to detect the model of the
+ * switch, if this fails the device is not configured in single chip
+ * addressing mode.
+ */
+ if (mdiodev->addr != 16)
+ return -EINVAL;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_detect(chip);
+}
+
static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
{
struct mv88e6xxx_chip *chip;
@@ -6830,11 +6856,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.vlan_msti_set = mv88e6xxx_vlan_msti_set,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_add = mv88e6xxx_port_mdb_add,
- .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
.port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
@@ -6959,10 +6985,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
- if (err)
- goto out;
-
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(chip->reset)) {
err = PTR_ERR(chip->reset);
@@ -6971,9 +6993,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (chip->reset)
usleep_range(1000, 2000);
- err = mv88e6xxx_detect(chip);
- if (err)
- goto out;
+ /* Detect if the device is configured in single chip addressing mode,
+ * otherwise continue with address specific smi init/detection.
+ */
+ err = mv88e6xxx_single_chip_detect(chip, mdiodev);
+ if (err) {
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto out;
+ }
if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
chip->tag_protocol = DSA_TAG_PROTO_EDSA;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9e28219b223d..33cb124ca912 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1197,7 +1197,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
- ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index f083b06fdfe9..39faf1027965 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -24,7 +24,6 @@ struct felix_info {
const u32 *port_modes;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 52a8566071ed..081871824eaf 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -638,6 +638,7 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x10F, .name = "drop_green_prio_5", },
{ .offset = 0x110, .name = "drop_green_prio_6", },
{ .offset = 0x111, .name = "drop_green_prio_7", },
+ OCELOT_STAT_END
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -2216,7 +2217,6 @@ static const struct felix_info felix_info_vsc9959 = {
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
.vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 68ef8f111bbe..48fd43a93364 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -636,6 +636,7 @@ static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
{ .offset = 0x8F, .name = "drop_green_prio_5", },
{ .offset = 0x90, .name = "drop_green_prio_6", },
{ .offset = 0x91, .name = "drop_green_prio_7", },
+ OCELOT_STAT_END
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
@@ -1086,7 +1087,6 @@ static const struct felix_info seville_info_vsc9953 = {
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
.stats_layout = vsc9953_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
.vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index d3ed0a7f8077..2727d3169c25 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1287,87 +1287,71 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
if (ret >= 0)
return ret;
- return qca8k_mdio_read(priv, phy, regnum);
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
}
static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
- if (!ret)
- return ret;
-
- return qca8k_mdio_write(priv, port, regnum, data);
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
- if (ret >= 0)
- return ret;
-
- ret = qca8k_mdio_read(priv, port, regnum);
-
- if (ret < 0)
- return 0xffff;
-
- return ret;
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
-
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
-
ds->slave_mii_bus = bus;
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
+ struct device_node *ports, *port;
phy_interface_t mode;
int err;
@@ -1429,24 +1413,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
QCA8K_MDIO_MASTER_EN);
}
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
+ return qca8k_mdio_register(priv);
}
static int
@@ -2346,7 +2313,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
+ priv->port_enabled_map |= BIT(port);
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
@@ -2360,23 +2327,25 @@ qca8k_port_disable(struct dsa_switch *ds, int port)
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
+ priv->port_enabled_map &= ~BIT(port);
}
static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
/* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
static int
@@ -3033,16 +3002,6 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -3202,8 +3161,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
+ priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
@@ -3243,13 +3201,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
- int i;
+ int port;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
continue;
- qca8k_port_set_status(priv, i, enable);
+ qca8k_port_set_status(priv, port, enable);
}
}
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index f375627174c8..04408e11402a 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -324,10 +324,6 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3,
};
-struct ar8xxx_port_status {
- int enabled;
-};
-
struct qca8k_match_data {
u8 id;
bool reduced_package;
@@ -388,17 +384,17 @@ struct qca8k_priv {
u8 mirror_rx;
u8 mirror_tx;
u8 lag_hash_mode;
- bool legacy_phy_port_mapping;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
- struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
struct qca8k_mgmt_eth_data mgmt_eth_data;
struct qca8k_mib_eth_data mib_eth_data;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b33841c6507a..72b6fc1932b5 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2252,14 +2252,13 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
- u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
-
speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
mac[i].speed);
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
if (priv->xpcs[i])
- bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
}
/* No PTP operations can run right now */
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 1111d1f33865..557ca8ff9dec 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 3add305d34b4..4eecbdfff3ff 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -265,8 +265,6 @@
#define SLIC_NUM_STAT_DESC_ARRAYS 4
#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
-#define SLIC_NAPI_WEIGHT 64
-
#define SLIC_UPR_LSTAT 0
#define SLIC_UPR_CONFIG 1
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 1fc9a1cd3ef8..ce353b0c02a3 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 07444aead3fd..6a356a6cee15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -31,8 +31,6 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
-#define ENA_NAPI_BUDGET 64
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
@@ -2270,7 +2268,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
netif_napi_add(adapter->netdev,
&napi->napi,
ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- ENA_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..7e9c74b141ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
@@ -64,8 +65,6 @@
*/
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
-#define AQ_CFG_NAPI_WEIGHT 64U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a418238f6309..1daecd483b8d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e65ce7199dac..88595863d8bc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,22 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
@@ -204,6 +219,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -418,10 +502,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..99870865f66d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,6 +12,8 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 24d715c28a35..e11cc29d3264 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -569,6 +569,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -725,6 +823,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..935ba889bd9a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -177,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 06de19f63287..275324c9e51e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1218,7 +1218,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_ingress, 0);
netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+ aq_ptp_poll, NAPI_POLL_WEIGHT);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 77e76c9efd32..ea740210803f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -339,11 +389,159 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ goto pass;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+pass:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
@@ -449,7 +647,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_add_rx_frag(skb, 0, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
@@ -469,7 +667,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -510,6 +708,149 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -529,7 +870,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -543,9 +883,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -600,6 +940,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..0a6c34438c1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 6ab1f3212d24..f0fdf20f01c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -125,7 +120,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->rx_rings = 0;
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+ aq_vec_poll, NAPI_POLL_WEIGHT);
err_exit:
return self;
@@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
@@ -300,8 +309,10 @@ void aq_vec_ring_free(struct aq_vec_s *self)
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..9dfd68f0fda9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..878a53abec33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 7b525c65bacb..2dfc1e32bbb3 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT);
err = bgmac_phy_connect(bgmac);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 110088e662ea..e05ac92c0650 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -364,8 +364,6 @@
#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
-#define BGMAC_WEIGHT 64
-
#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b41b73..4e9215bce4ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2212,7 +2212,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2381,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2493,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2529,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -6218,7 +6218,7 @@
#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1d69fe0737a1..082518e68579 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -56,6 +56,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
+#include <linux/align.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -738,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
- *mapping += bp->rx_dma_offset;
return page;
}
@@ -780,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
@@ -840,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+ if (!page)
+ return -ENOMEM;
+
+ } else {
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
}
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -962,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+ bp->rx_dma_offset);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_dma_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -984,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -995,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1038,22 +1080,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct skb_shared_info *shinfo,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1069,8 +1113,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ skb_frag_off_set(frag, cons_rx_buf->offset);
+ skb_frag_size_set(frag, frag_len);
+ __skb_frag_set_page(frag, cons_rx_buf->page);
+ shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1081,16 +1127,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
+ if (xdp && page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
unsigned int nr_frags;
- shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
- dev_kfree_skb(skb);
-
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
@@ -1098,23 +1142,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- DMA_FROM_DEVICE,
+ bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
-
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+ agg_bufs, tpa, NULL);
+ if (!total_frag_len) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb->data_len += total_frag_len;
+ skb->len += total_frag_len;
+ skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+ idx, agg_bufs, tpa, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1644,7 +1727,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1729,8 +1812,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 flags, misc;
void *data;
int rc = 0;
@@ -1839,18 +1924,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+ if (!frag_len) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
@@ -1872,11 +1978,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
}
}
@@ -2492,10 +2609,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ }
bnapi->events = 0;
}
@@ -2876,14 +2996,23 @@ skip_rx_buf_free:
if (!page)
continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ } else {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
+ __free_page(page);
+ }
}
skip_rx_agg_free:
@@ -3797,7 +3926,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3838,9 +3967,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+ rx_size = BNXT_MAX_PAGE_MODE_MTU;
+ } else {
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3881,14 +4016,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
} else {
@@ -5230,12 +5372,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req->enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+ if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ }
/* thresholds not implemented in firmware yet */
req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
@@ -11035,6 +11180,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ if (!(bp->flags & BNXT_FLAG_TPA))
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
if (!(features & NETIF_F_GRO))
features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98453a78cbd0..a498ee297946 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -591,10 +591,12 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+#define BNXT_PAGE_MODE_BUF_SIZE \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
- XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+ XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ BNXT_PAGE_MODE_BUF_SIZE - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
#define BNXT_MIN_PKT_SIZE 52
@@ -699,13 +701,12 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
u8 is_gso;
u8 is_push;
u8 action;
- union {
- unsigned short nr_frags;
- u16 rx_prod;
- };
+ unsigned short nr_frags;
+ u16 rx_prod;
};
struct bnxt_sw_rx_bd {
@@ -1817,6 +1818,7 @@ struct bnxt {
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
#define BNXT_CHIP_SR2(bp) \
((bp)->chip_num == CHIP_NUM_58818)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 22e965e18fbc..b3a48d6675fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3491,7 +3491,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 03b1d6c04504..f02fe906dedb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -24,36 +24,91 @@ DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
- struct bnxt_sw_tx_bd *tx_buf;
+ struct skb_shared_info *sinfo;
+ struct bnxt_sw_tx_bd *tx_buf, *first_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
+
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
+ /* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
+ first_buf = tx_buf;
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+ return NULL;
+
+ dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- return tx_buf;
+
+ return first_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -63,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -78,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int i, j, frags;
for (i = 0; i < nr_pkts; i++) {
tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -96,6 +151,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
} else if (tx_buf->action == XDP_TX) {
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
}
tx_cons = NEXT_TX(tx_cons);
}
@@ -103,7 +165,52 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
+ }
+}
+
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp)
+{
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
}
+ shinfo->nr_frags = 0;
}
/* returns the following:
@@ -111,14 +218,14 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -128,16 +235,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
txr = rxr->bnapi->tx_ring;
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
- xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -150,26 +251,38 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
+ if (orig_data != xdp.data)
offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
- }
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event = 0;
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -177,6 +290,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -184,6 +299,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -203,6 +319,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -270,8 +387,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
int tx_xdp = 0, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
@@ -337,3 +455,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ PAGE_SIZE * sinfo->nr_frags,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 067bb5e821f5..505911ae095d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -14,13 +14,25 @@ DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff xdp, struct page *page, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1);
#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index a1a38456c9a3..5d5f10180158 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2534,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f1d2c4cd5da2..f6fe08df568b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1881,7 +1881,6 @@ poll_exit:
return rcvd;
}
-#define BNAD_NAPI_POLL_QUOTA 64
static void
bnad_napi_add(struct bnad *bnad, u32 rx_id)
{
@@ -1892,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e475be29845c..6434e74c04f1 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -337,11 +337,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -391,11 +389,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -2753,9 +2749,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -4142,11 +4138,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4594,7 +4588,7 @@ static int zynqmp_init(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy");
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(bp->sgmii_phy)) {
ret = PTR_ERR(bp->sgmii_phy);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..1281d1565ef8 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..60b648b46f75 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..41714203ace8 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+ size_t len, int flags, int *addr_len);
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..539992dad8ba 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1616,7 +1616,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1737,7 +1737,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8014eb33937c..9e6de2f968fa 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -68,7 +68,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define DEFAULT_GMAC_RXQ_ORDER 9
#define DEFAULT_GMAC_TXQ_ORDER 8
#define DEFAULT_RX_BUF_ORDER 11
-#define DEFAULT_NAPI_WEIGHT 64
#define TX_MAX_FRAGS 16
#define TX_QUEUE_NUM 1 /* max: 6 */
#define RX_MAX_ALLOC_ORDER 2
@@ -2472,8 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll,
- DEFAULT_NAPI_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT);
ret = of_get_mac_address(np, mac);
if (!ret) {
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..1db19463fd46 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8689d4a51fe5..61fe9625bed1 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -101,8 +101,7 @@
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
-#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_NUM_POST_ERX_DB 255u
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d0c262f2695a..5939068a8f62 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2983,7 +2983,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
}
return 0;
}
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 904f3304727e..49c93aa38862 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1091,8 +1091,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter)
retval = of_mdiobus_register(adapter->mdiobus, np);
out:
- if (np)
- of_node_put(np);
+ of_node_put(np);
return retval;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 206b7a35eaf5..f0b652a65043 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3232,7 +3232,7 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ gfar_poll_rx_sq, NAPI_POLL_WEIGHT);
netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index ca5e14f908fe..68b59d3202e3 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -52,9 +52,6 @@ struct ethtool_rx_list {
unsigned int count;
};
-/* The maximum number of packets to be handled in one call of gfar_poll */
-#define GFAR_DEV_WEIGHT 64
-
/* Length for FCB */
#define GMAC_FCB_LEN 8
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
index a849b3c6b01f..d50c222948b4 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -6,13 +6,7 @@
static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
- int err;
-
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
- return 0;
+ return devlink_info_driver_name_put(req, KBUILD_MODNAME);
}
static const struct devlink_ops fun_dl_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index b668df6193be..8c7fadf2b734 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg {
struct hclge_pf_to_vf_msg {
u16 code;
- u16 vf_mbx_msg_code;
- u16 vf_mbx_msg_subcode;
- u16 resp_status;
- u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ union {
+ /* used for mbx response */
+ struct {
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
};
struct hclge_mbx_vf_to_pf_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 79c64f4e67d2..8a3a446219f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c15ca710dabb..c8b151d29f53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static void
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 876650eddac4..7a7d4cf9bf35 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a3ee7875d6a7..ae56306400b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
- /* only device version above V3(include V3), GL can switch CQ/EQ
- * period mode.
- */
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index f4da77452126..1db8a86f046d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring[rx_queue_index].desc_num;
kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1104,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "ringparam not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -1120,62 +1152,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
return 0;
}
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
+ struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- u16 queue_num = h->kinfo.num_tqps;
- u32 old_rx_buf_len;
int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
- /* Hardware requires that its descriptors must be multiple of eight */
- new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
- new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring[0].desc_num;
- old_rx_desc_num = priv->ring[queue_num].desc_num;
- old_rx_buf_len = priv->ring[queue_num].buf_size;
- if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num &&
- kernel_param->rx_buf_len == old_rx_buf_len)
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
- netdev_err(ndev,
- "backup ring param failed by allocating memory fail\n");
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
- old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num,
- old_rx_buf_len, kernel_param->rx_buf_len);
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
- hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
- hns3_change_rx_buf_len(ndev, old_rx_buf_len);
- hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
@@ -1385,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
-static int hns3_check_coalesce_para(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
@@ -1464,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
- ret = hns3_check_coalesce_para(netdev, cmd);
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
@@ -1825,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
- netdev_warn(netdev,
- "change tx spare buf size fail, revert to old value\n");
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size);
if (ret1) {
- netdev_err(netdev,
- "revert to old tx spare buf size fail\n");
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
return ret1;
}
return ret;
}
+
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
break;
default:
ret = -EOPNOTSUPP;
@@ -1858,7 +1934,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
-#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
index 822d6fcbc73b..da207d1d9aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 42a9e73d8588..6efd768cc07c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 8cebb180c812..a5dd2c8c244a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7998ca617a92..49c40744cda5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -109,7 +109,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->msg_len = msg_len;
resp_pf_to_vf->msg.code = mbx_opcode;
- memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 342d7cdf6285..e13d71abd9f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* ensure vf tbl list as empty before init*/
+ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index d5e0a3f762f7..c8055d69255c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
- * to prtect the received_response from race condition
+ * to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
- * @resp_msg: pointer to store the original message type and response status
- * @len: the resp_msg data array length.
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5c5931dba51d..3699c5435396 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -257,12 +257,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ u64 prev = 0;
int rc;
if (!reuse_ltb(ltb, size)) {
dev_dbg(dev,
"LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
+ prev = ltb->size;
free_long_term_buff(adapter, ltb);
}
@@ -283,8 +285,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
- "Allocated new LTB [map %d, size 0x%llx]\n",
- ltb->map_id, ltb->size);
+ "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
+ ltb->map_id, ltb->size, prev);
}
/* Ensure ltb is zeroed - specially when reusing it. */
@@ -345,6 +347,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = 0;
}
+/**
+ * free_ltb_set - free the given set of long term buffers (LTBS)
+ * @adapter: The ibmvnic adapter containing this ltb set
+ * @ltb_set: The ltb_set to be freed
+ *
+ * Free the set of LTBs in the given set.
+ */
+
+static void free_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set)
+{
+ int i;
+
+ for (i = 0; i < ltb_set->num_ltbs; i++)
+ free_long_term_buff(adapter, &ltb_set->ltbs[i]);
+
+ kfree(ltb_set->ltbs);
+ ltb_set->ltbs = NULL;
+ ltb_set->num_ltbs = 0;
+}
+
+/**
+ * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb_set: container object for the set of LTBs
+ * @num_buffs: Number of buffers in the LTB
+ * @buff_size: Size of each buffer in the LTB
+ *
+ * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
+ * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
+ * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
+ * If new set needs more than in old set, allocate the remaining ones.
+ * Try and reuse as many LTBs as possible and avoid reallocation.
+ *
+ * Any changes to this allocation strategy must be reflected in
+ * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
+ */
+static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set, int num_buffs,
+ int buff_size)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ltb_set old_set;
+ struct ibmvnic_ltb_set new_set;
+ int rem_size;
+ int tot_size; /* size of all ltbs */
+ int ltb_size; /* size of one ltb */
+ int nltbs;
+ int rc;
+ int n;
+ int i;
+
+ dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
+ buff_size);
+
+ ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
+ tot_size = num_buffs * buff_size;
+
+ if (ltb_size > tot_size)
+ ltb_size = tot_size;
+
+ nltbs = tot_size / ltb_size;
+ if (tot_size % ltb_size)
+ nltbs++;
+
+ old_set = *ltb_set;
+
+ if (old_set.num_ltbs == nltbs) {
+ new_set = old_set;
+ } else {
+ int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
+
+ new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
+ if (!new_set.ltbs)
+ return -ENOMEM;
+
+ new_set.num_ltbs = nltbs;
+
+ /* Free any excess ltbs in old set */
+ for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
+ free_long_term_buff(adapter, &old_set.ltbs[i]);
+
+ /* Copy remaining ltbs to new set. All LTBs except the
+ * last one are of the same size. alloc_long_term_buff()
+ * will realloc if the size changes.
+ */
+ n = min(old_set.num_ltbs, new_set.num_ltbs);
+ for (i = 0; i < n; i++)
+ new_set.ltbs[i] = old_set.ltbs[i];
+
+ /* Any additional ltbs in new set will have NULL ltbs for
+ * now and will be allocated in alloc_long_term_buff().
+ */
+
+ /* We no longer need the old_set so free it. Note that we
+ * may have reused some ltbs from old set and freed excess
+ * ltbs above. So we only need to free the container now
+ * not the LTBs themselves. (i.e. dont free_ltb_set()!)
+ */
+ kfree(old_set.ltbs);
+ old_set.ltbs = NULL;
+ old_set.num_ltbs = 0;
+
+ /* Install the new set. If allocations fail below, we will
+ * retry later and know what size LTBs we need.
+ */
+ *ltb_set = new_set;
+ }
+
+ i = 0;
+ rem_size = tot_size;
+ while (rem_size) {
+ if (ltb_size > rem_size)
+ ltb_size = rem_size;
+
+ rem_size -= ltb_size;
+
+ rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
+ if (rc)
+ goto out;
+ i++;
+ }
+
+ WARN_ON(i != new_set.num_ltbs);
+
+ return 0;
+out:
+ /* We may have allocated one/more LTBs before failing and we
+ * want to try and reuse on next reset. So don't free ltb set.
+ */
+ return rc;
+}
+
+/**
+ * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
+ * @rxpool: The receive buffer pool containing buffer
+ * @bufidx: Index of buffer in rxpool
+ * @ltbp: (Output) pointer to the long term buffer containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
+ * pool and its corresponding offset. Assume for now that each LTB is of
+ * different size but could possibly be optimized based on the allocation
+ * strategy in alloc_ltb_set().
+ */
+static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON(bufidx >= rxpool->size);
+
+ for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
+ ltb = &rxpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / rxpool->buff_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * rxpool->buff_size;
+}
+
+/**
+ * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
+ * @txpool: The transmit buffer pool containing buffer
+ * @bufidx: Index of buffer in txpool
+ * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [txpool, bufidx] to an LTB in the
+ * pool and its corresponding offset.
+ */
+static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON_ONCE(bufidx >= txpool->num_buffers);
+
+ for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
+ ltb = &txpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / txpool->buf_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * txpool->buf_size;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -361,6 +565,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -369,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
dma_addr_t dma_addr;
unsigned char *dst;
int shift = 0;
- int index;
+ int bufidx;
int i;
if (!pool->active)
@@ -385,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- index = pool->free_map[pool->next_free];
+ bufidx = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
- skb = pool->rx_buff[index].skb;
+ skb = pool->rx_buff[bufidx].skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
pool->buff_size);
@@ -407,26 +612,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
- offset = index * pool->buff_size;
- dst = pool->long_term_buff.buff + offset;
+ map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
+ dst = ltb->buff + offset;
memset(dst, 0, pool->buff_size);
- dma_addr = pool->long_term_buff.addr + offset;
+ dma_addr = ltb->addr + offset;
/* add the skb to an rx_buff in the pool */
- pool->rx_buff[index].data = dst;
- pool->rx_buff[index].dma = dma_addr;
- pool->rx_buff[index].skb = skb;
- pool->rx_buff[index].pool_index = pool->index;
- pool->rx_buff[index].size = pool->buff_size;
+ pool->rx_buff[bufidx].data = dst;
+ pool->rx_buff[bufidx].dma = dma_addr;
+ pool->rx_buff[bufidx].skb = skb;
+ pool->rx_buff[bufidx].pool_index = pool->index;
+ pool->rx_buff[bufidx].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
sub_crq->rx_add.correlator =
- cpu_to_be64((u64)&pool->rx_buff[index]);
+ cpu_to_be64((u64)&pool->rx_buff[bufidx]);
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.map_id = ltb->map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -466,10 +671,10 @@ failure:
sub_crq = &ind_bufp->indir_arr[i];
rx_buff = (struct ibmvnic_rx_buff *)
be64_to_cpu(sub_crq->rx_add.correlator);
- index = (int)(rx_buff - pool->rx_buff);
- pool->free_map[pool->next_free] = index;
- dev_kfree_skb_any(pool->rx_buff[index].skb);
- pool->rx_buff[index].skb = NULL;
+ bufidx = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = bufidx;
+ dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
+ pool->rx_buff[bufidx].skb = NULL;
}
adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
@@ -579,7 +784,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(rx_pool->free_map);
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
continue;
@@ -724,8 +929,8 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size);
+ rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
+ rx_pool->size, rx_pool->buff_size);
if (rc)
goto out;
@@ -782,7 +987,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
{
kfree(tx_pool->tx_buff);
kfree(tx_pool->free_map);
- free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_ltb_set(adapter, &tx_pool->ltb_set);
}
/**
@@ -972,17 +1177,16 @@ update_ltb:
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
- u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
- ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
- i, tx_pool->long_term_buff.buff,
- tx_pool->num_buffers, tx_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
+ i, tx_pool->num_buffers, tx_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
+ tx_pool->num_buffers, tx_pool->buf_size);
+ if (rc)
+ goto out;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
@@ -991,14 +1195,14 @@ update_ltb:
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
- ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
- i, tso_pool->long_term_buff.buff,
- tso_pool->num_buffers, tso_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
+ i, tso_pool->num_buffers, tso_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
+ tso_pool->num_buffers, tso_pool->buf_size);
+ if (rc)
+ goto out;
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
@@ -1911,6 +2115,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -1926,7 +2131,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- int index = 0;
+ int bufidx = 0;
u8 proto = 0;
/* If a reset is in progress, drop the packet since
@@ -1960,9 +2165,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool = &adapter->tx_pool[queue_num];
- index = tx_pool->free_map[tx_pool->consumer_index];
+ bufidx = tx_pool->free_map[tx_pool->consumer_index];
- if (index == IBMVNIC_INVALID_MAP) {
+ if (bufidx == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
@@ -1973,10 +2178,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
- offset = index * tx_pool->buf_size;
- dst = tx_pool->long_term_buff.buff + offset;
+ map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
+
+ dst = ltb->buff + offset;
memset(dst, 0, tx_pool->buf_size);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ data_dma_addr = ltb->addr + offset;
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2003,9 +2209,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
- tx_buff = &tx_pool->tx_buff[index];
+ tx_buff = &tx_pool->tx_buff[bufidx];
tx_buff->skb = skb;
- tx_buff->index = index;
+ tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -2017,10 +2223,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_is_gso(skb))
tx_crq.v1.correlator =
- cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
+ cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
else
- tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.correlator = cpu_to_be32(bufidx);
+ tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -3972,16 +4178,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->desired.rx_entries =
adapter->max_rx_add_entries_per_subcrq;
- max_entries = IBMVNIC_MAX_LTB_SIZE /
+ max_entries = IBMVNIC_LTB_SET_SIZE /
(adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.tx_entries = max_entries;
}
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.rx_entries = max_entries;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1310c861bf83..e5c6ff3d0c47 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -36,9 +36,50 @@
#define IBMVNIC_TSO_BUFS 64
#define IBMVNIC_TSO_POOL_MASK 0x80000000
-#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
-#define IBMVNIC_BUFFER_HLEN 500
+/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
+ * has a set of buffers. The size of each buffer is determined by the MTU.
+ *
+ * Each Rx/Tx pool is also associated with a DMA region that is shared
+ * with the "hardware" (VIOS) and used to send/receive packets. The DMA
+ * region is also referred to as a Long Term Buffer or LTB.
+ *
+ * The size of the DMA region required for an Rx/Tx pool depends on the
+ * number and size (MTU) of the buffers in the pool. At the max levels
+ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
+ * some padding.
+ *
+ * But the size of a single DMA region is limited by MAX_ORDER in the
+ * kernel (about 16MB currently). To support say 4K Jumbo frames, we
+ * use a set of LTBs (struct ltb_set) per pool.
+ *
+ * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
+ * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
+ * (must be <= IBMVNIC_ONE_LTB_MAX)
+ * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
+ *
+ * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
+ * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
+ *
+ * The Rx and Tx pools can have upto 4096 buffers. The max size of these
+ * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
+ * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
+ *
+ * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
+ * the allocation of the LTB can fail when system is low in memory. If
+ * its too small, we would need several mappings for each of the Rx/
+ * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
+ * VNIC protocol.
+ *
+ * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
+ * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
+ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
+ * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
+ */
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
+#define IBMVNIC_LTB_SET_SIZE (38 << 20)
+#define IBMVNIC_BUFFER_HLEN 500
#define IBMVNIC_RESET_DELAY 100
struct ibmvnic_login_buffer {
@@ -793,6 +834,11 @@ struct ibmvnic_long_term_buff {
u8 map_id;
};
+struct ibmvnic_ltb_set {
+ int num_ltbs;
+ struct ibmvnic_long_term_buff *ltbs;
+};
+
struct ibmvnic_tx_buff {
struct sk_buff *skb;
int index;
@@ -805,7 +851,7 @@ struct ibmvnic_tx_pool {
int *free_map;
int consumer_index;
int producer_index;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
int num_buffers;
int buf_size;
} ____cacheline_aligned;
@@ -828,7 +874,7 @@ struct ibmvnic_rx_pool {
int next_free;
int next_alloc;
int active;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
} ____cacheline_aligned;
struct ibmvnic_vpd {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 55c6bce5da61..18558a019353 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -852,6 +852,7 @@ struct i40e_vsi {
u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_buf_failed;
u64 rx_page_failed;
u64 rx_page_reuse;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6aefffd83615..2819e261a126 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
hw->mac.type = I40E_MAC_X722;
break;
default:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index be7c6f34d45c..c9dcd6d92c83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
- tx_ring->tx_stats.tx_done_old);
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 1bcb0ec0f0c0..2610338002fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -33,6 +33,7 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e48499624d22..610f00cbaff9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -293,12 +293,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6778df2177a1..358c2edc118d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@ -785,6 +786,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -804,6 +806,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
rx_reuse = 0;
@@ -828,6 +831,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@ -872,6 +876,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
vsi->rx_page_reuse = rx_reuse;
@@ -13436,8 +13441,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@ -13468,6 +13472,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0eae5858f2fe..7bc1174edf6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3,6 +3,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
@@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
{
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
- u16 gso_segs, gso_size;
+ u16 gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
} else {
ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
@@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
- gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
- first->gso_segs = gso_segs;
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
@@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *exthdr;
u32 offset, cmd = 0;
__be16 frag_off;
+ __be16 protocol;
u8 l4_proto = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
/* compute outer L2 header size */
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -3373,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
/* Memory barrier before checking head and tail */
smp_mb();
+ ++tx_ring->tx_stats.tx_stopped;
+
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3749,7 +3774,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
struct i40e_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
- __be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso, count;
@@ -3791,15 +3815,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
- /* obtain protocol of skb */
- protocol = vlan_get_protocol(skb);
-
- /* setup IPv4/IPv6 offloads */
- if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
-
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index c471c2da313c..41f86e9535a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -290,6 +290,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
int prev_pkt_ctr;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 19da3b22160f..8c5118c8baaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_CONSUMED BIT(0)
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
+#define I40E_XDP_EXIT BIT(3)
/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index c1d25b0b0ca2..af3e7e6afc85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -161,9 +161,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return I40E_XDP_REDIR;
+ if (!err)
+ return I40E_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = I40E_XDP_EXIT;
+ else
+ result = I40E_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -175,16 +179,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (result == I40E_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = I40E_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = I40E_XDP_CONSUMED;
- break;
}
return result;
}
@@ -271,7 +275,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
unsigned int *rx_packets,
unsigned int *rx_bytes,
unsigned int size,
- unsigned int xdp_res)
+ unsigned int xdp_res,
+ bool *failure)
{
struct sk_buff *skb;
@@ -281,11 +286,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
+ if (xdp_res == I40E_XDP_EXIT) {
+ *failure = true;
+ return;
+ }
+
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff);
return;
}
-
if (xdp_res == I40E_XDP_PASS) {
/* NB! We are not checking for errors using
* i40e_test_staterr with
@@ -371,7 +380,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res);
+ &rx_bytes, size, xdp_res, &failure);
+ if (failure)
+ break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
@@ -382,7 +393,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -594,13 +605,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_queue_pairs)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 9a0a358a15c2..6d8beb84d852 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3336,7 +3336,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 25b8f6f726eb..496250f9f8fc 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -30,12 +30,46 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+enum {
+ ICE_PKT_VLAN = BIT(0),
+ ICE_PKT_OUTER_IPV6 = BIT(1),
+ ICE_PKT_TUN_GTPC = BIT(2),
+ ICE_PKT_TUN_GTPU = BIT(3),
+ ICE_PKT_TUN_NVGRE = BIT(4),
+ ICE_PKT_TUN_UDP = BIT(5),
+ ICE_PKT_INNER_IPV6 = BIT(6),
+ ICE_PKT_INNER_TCP = BIT(7),
+ ICE_PKT_INNER_UDP = BIT(8),
+ ICE_PKT_GTP_NOPAY = BIT(9),
+};
+
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
-static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+struct ice_dummy_pkt_profile {
+ const struct ice_dummy_pkt_offsets *offsets;
+ const u8 *pkt;
+ u32 match;
+ u16 pkt_len;
+};
+
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
+ ice_dummy_##type##_packet_offsets[]
+
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
+ static const u8 ice_dummy_##type##_packet[]
+
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+}
+
+ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -47,7 +81,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -82,7 +116,7 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -94,7 +128,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -126,7 +160,7 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -141,7 +175,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -179,7 +213,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -194,7 +228,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -229,8 +263,7 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -242,7 +275,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -282,8 +315,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -295,7 +327,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -332,8 +364,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -348,7 +379,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -391,8 +422,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -407,7 +437,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -448,7 +478,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
};
/* offset info for MAC + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -457,7 +487,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + UDP */
-static const u8 dummy_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -477,7 +507,7 @@ static const u8 dummy_udp_packet[] = {
};
/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -487,7 +517,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:UDP dummy packet */
-static const u8 dummy_vlan_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -509,7 +539,7 @@ static const u8 dummy_vlan_udp_packet[] = {
};
/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -518,7 +548,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -541,7 +571,7 @@ static const u8 dummy_tcp_packet[] = {
};
/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -551,7 +581,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -575,7 +605,7 @@ static const u8 dummy_vlan_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -583,7 +613,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -611,8 +641,7 @@ static const u8 dummy_tcp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -622,7 +651,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -652,7 +681,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
};
/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -661,7 +690,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
};
/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -689,8 +718,7 @@ static const u8 dummy_udp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -700,7 +728,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -727,8 +755,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -738,7 +765,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -776,8 +803,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -787,7 +813,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -822,8 +848,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
};
/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -833,7 +858,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -875,8 +900,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -886,7 +910,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -925,8 +949,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -936,7 +959,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -978,8 +1001,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -989,7 +1011,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1028,8 +1050,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1039,7 +1060,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1086,8 +1107,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1097,7 +1117,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1141,7 +1161,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1171,17 +1199,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
0x00, 0x00,
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_UDP_OF, 34 },
- { ICE_GTP_NO_PAY, 42 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1189,7 +1207,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1215,6 +1233,55 @@ static const u8 dummy_ipv6_gtp_packet[] = {
0x00, 0x00,
};
+static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
+ ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
+ ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
+ ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp, 0),
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -5501,212 +5568,66 @@ err_free_lkup_exts:
* structure per protocol header
* @lkups_cnt: number of protocols
* @tun_type: tunnel type
- * @pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: pointer to receive the pointer to the offsets for the packet
+ *
+ * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
*/
-static void
+static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- enum ice_sw_tunnel_type tun_type,
- const u8 **pkt, u16 *pkt_len,
- const struct ice_dummy_pkt_offsets **offsets)
+ enum ice_sw_tunnel_type tun_type)
{
- bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
- bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
+ const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
+ u32 match = 0;
u16 i;
+ switch (tun_type) {
+ case ICE_SW_TUN_GTPC:
+ match |= ICE_PKT_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_GTPU:
+ match |= ICE_PKT_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ match |= ICE_PKT_TUN_NVGRE;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ match |= ICE_PKT_TUN_UDP;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- inner_udp = true;
+ match |= ICE_PKT_INNER_UDP;
else if (lkups[i].type == ICE_TCP_IL)
- inner_tcp = true;
+ match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_VLAN_OFOS)
- vlan = true;
+ match |= ICE_PKT_VLAN;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_ETYPE_IL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_IPV6_IL)
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_GTP_NO_PAY)
- gtp_no_pay = true;
+ match |= ICE_PKT_GTP_NOPAY;
}
- if (tun_type == ICE_SW_TUN_GTPU) {
- if (outer_ipv6) {
- if (gtp_no_pay) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- } else {
- if (gtp_no_pay) {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_GTPC) {
- if (outer_ipv6) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_NVGRE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_gre_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
- *offsets = dummy_gre_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_gre_tcp_packet;
- *pkt_len = sizeof(dummy_gre_tcp_packet);
- *offsets = dummy_gre_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_gre_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
- *offsets = dummy_gre_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_gre_udp_packet;
- *pkt_len = sizeof(dummy_gre_udp_packet);
- *offsets = dummy_gre_udp_packet_offsets;
- return;
- }
-
- if (tun_type == ICE_SW_TUN_VXLAN ||
- tun_type == ICE_SW_TUN_GENEVE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
- *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_udp_tun_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
- *offsets = dummy_udp_tun_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
- *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_tun_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_udp_packet);
- *offsets = dummy_udp_tun_udp_packet_offsets;
- return;
- }
+ while (ret->match && (match & ret->match) != ret->match)
+ ret++;
- if (inner_udp && !outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_packet;
- *pkt_len = sizeof(dummy_vlan_udp_packet);
- *offsets = dummy_vlan_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_packet;
- *pkt_len = sizeof(dummy_udp_packet);
- *offsets = dummy_udp_packet_offsets;
- return;
- } else if (inner_udp && outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
- *offsets = dummy_vlan_udp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_udp_ipv6_packet);
- *offsets = dummy_udp_ipv6_packet_offsets;
- return;
- } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
- *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_tcp_ipv6_packet);
- *offsets = dummy_tcp_ipv6_packet_offsets;
- return;
- }
-
- if (vlan) {
- *pkt = dummy_vlan_tcp_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_packet);
- *offsets = dummy_vlan_tcp_packet_offsets;
- } else {
- *pkt = dummy_tcp_packet;
- *pkt_len = sizeof(dummy_tcp_packet);
- *offsets = dummy_tcp_packet_offsets;
- }
+ return ret;
}
/**
@@ -5716,15 +5637,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* structure per protocol header
* @lkups_cnt: number of protocols
* @s_rule: stores rule information from the match criteria
- * @dummy_pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: offset info for the dummy packet
+ * @profile: dummy packet profile (the template, its size and header offsets)
*/
static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
- const u8 *dummy_pkt, u16 pkt_len,
- const struct ice_dummy_pkt_offsets *offsets)
+ const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
u16 i;
@@ -5734,9 +5652,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*/
pkt = s_rule->pdata.lkup_tx_rx.hdr;
- memcpy(pkt, dummy_pkt, pkt_len);
+ memcpy(pkt, profile->pkt, profile->pkt_len);
for (i = 0; i < lkups_cnt; i++) {
+ const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
enum ice_protocol_type type;
u16 offset = 0, len = 0, j;
bool found = false;
@@ -5810,16 +5729,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* indicated by the mask to make sure we don't improperly write
* over any significant packet data.
*/
- for (j = 0; j < len / sizeof(u16); j++)
- if (((u16 *)&lkups[i].m_u)[j])
- ((u16 *)(pkt + offset))[j] =
- (((u16 *)(pkt + offset))[j] &
- ~((u16 *)&lkups[i].m_u)[j]) |
- (((u16 *)&lkups[i].h_u)[j] &
- ((u16 *)&lkups[i].m_u)[j]);
+ for (j = 0; j < len / sizeof(u16); j++) {
+ u16 *ptr = (u16 *)(pkt + offset);
+ u16 mask = lkups[i].m_raw[j];
+
+ if (!mask)
+ continue;
+
+ ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
+ }
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -6042,12 +5963,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
- const struct ice_dummy_pkt_offsets *pkt_offsets;
struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ const struct ice_dummy_pkt_profile *profile;
+ u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
struct ice_switch_info *sw;
- const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
int status;
@@ -6065,24 +5985,18 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* get # of words we need to match */
word_cnt = 0;
for (i = 0; i < lkups_cnt; i++) {
- u16 j, *ptr;
+ u16 j;
- ptr = (u16 *)&lkups[i].m_u;
- for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
- if (ptr[j] != 0)
+ for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
+ if (lkups[i].m_raw[j])
word_cnt++;
}
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
return -EINVAL;
- /* make sure that we can locate a dummy packet */
- ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
- &pkt_offsets);
- if (!pkt) {
- status = -EINVAL;
- goto err_ice_add_adv_rule;
- }
+ /* locate a dummy packet */
+ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
@@ -6123,7 +6037,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
return status;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -6183,8 +6097,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
- status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
- pkt_len, pkt_offsets);
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
goto err_ice_add_adv_rule;
@@ -6192,7 +6105,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
- pkt_offsets);
+ profile->offsets);
if (status)
goto err_ice_add_adv_rule;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ed3d1d03befa..ecac75e71395 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -138,8 +138,16 @@ struct ice_update_recipe_lkup_idx_params {
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
- union ice_prot_hdr h_u; /* Header values */
- union ice_prot_hdr m_u; /* Mask of header values to match */
+ union {
+ union ice_prot_hdr h_u; /* Header values */
+ /* Used to iterate over the headers */
+ u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
+ union {
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+ /* Used to iterate over header mask */
+ u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
};
struct ice_sw_act_ctrl {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f9bf008471c9..3f8b7274ed2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -8,6 +8,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cead3eb149bd..f5a906c03669 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -133,6 +133,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED BIT(0)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
+#define ICE_XDP_EXIT BIT(3)
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 9dd38f667059..49ba8bfdbf04 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -545,9 +545,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -558,15 +562,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -587,6 +592,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
+ int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
@@ -634,18 +640,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
- if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(xdp);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ } else if (xdp_res == ICE_XDP_PASS) {
+ goto construct_skb;
+ }
- total_rx_bytes += size;
- total_rx_packets++;
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
- ice_bump_ntc(rx_ring);
- continue;
- }
construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
@@ -673,7 +684,9 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -929,13 +942,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_txq)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index bba3feaf3318..f1f69ce67420 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -8,6 +8,7 @@
#define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2)
+#define IXGBE_XDP_EXIT BIT(3)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index dd7ff66d422f..1703c640a434 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return IXGBE_XDP_REDIR;
+ if (!err)
+ return IXGBE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = IXGBE_XDP_EXIT;
+ else
+ result = IXGBE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = IXGBE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = IXGBE_XDP_CONSUMED;
- break;
}
return result;
}
@@ -303,21 +307,26 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
- if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(bi->xdp);
+ if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IXGBE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IXGBE_XDP_CONSUMED) {
+ xsk_buff_free(bi->xdp);
+ } else if (xdp_res == IXGBE_XDP_PASS) {
+ goto construct_skb;
+ }
- bi->xdp = NULL;
- total_rx_packets++;
- total_rx_bytes += size;
+ bi->xdp = NULL;
+ total_rx_packets++;
+ total_rx_bytes += size;
- cleaned_count++;
- ixgbe_inc_ntc(rx_ring);
- continue;
- }
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+construct_skb:
/* XDP_PASS path */
skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
@@ -516,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!READ_ONCE(adapter->xdp_prog))
- return -ENXIO;
+ return -EINVAL;
if (qid >= adapter->num_xdp_queues)
- return -ENXIO;
+ return -EINVAL;
ring = adapter->xdp_ring[qid];
@@ -527,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!ring->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..f58a1c0144ba 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
@@ -177,6 +178,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9f88fe822555..ceba4aa4f026 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..f6a54c7f0c69 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000000..0d7db815340e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000000..2026c8118158
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000000..6ad88d0fe43f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 val;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7);
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_pf_mbox_intr(struct octep_device *oct)
+{
+ u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ for (qno = 0; qno < OCTEP_MAX_VF; qno++) {
+ val = readq(oct->mbox[qno]->mbox_read_reg);
+ dev_dbg(&oct->pdev->dev,
+ "PF MBOX READ: val:%llx from VF:%llx\n", val, qno);
+ }
+
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MBOX_RINT intr: 0x%llx\n", reg_val);
+ cn93_handle_pf_mbox_intr(oct);
+ goto irq_handled;
+ }
+
+ /* Check for OEI INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received OEI_EINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val);
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ goto irq_handled;
+ }
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000000..f208f3f9a447
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000000..39322e4dd100
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \
+ (sizeof(struct octep_ctrl_mbox_msg) * (i)))
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask)
+{
+ return (index + 1) & mask;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask)
+{
+ return mask - ((pi - ci) & mask);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask)
+{
+ return ((pi - ci) & mask);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1);
+ mutex_init(&mbox->h2fq_lock);
+
+ mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1);
+ mutex_init(&mbox->f2hq_lock);
+
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_INFO_SZ +
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ +
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ;
+
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->h2fq.hw_q +
+ ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) *
+ mbox->h2fq.elem_cnt);
+
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS);
+ unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS);
+ struct octep_ctrl_mbox_q *q;
+ unsigned long expire;
+ u64 *mbuf, *word0;
+ u8 __iomem *qidx;
+ u16 pi, ci;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask))
+ return -ENOMEM;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi);
+ mbuf = (u64 *)msg->msg;
+ word0 = &msg->hdr.word0;
+
+ mutex_lock(&mbox->h2fq_lock);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(*word0, qidx);
+
+ pi = octep_ctrl_mbox_circq_inc(pi, q->mask);
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ /* don't check for notification response */
+ if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ return 0;
+
+ expire = jiffies + timeout;
+ while (true) {
+ *word0 = readq(qidx);
+ if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ break;
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current) || time_after(jiffies, expire)) {
+ pr_info("octep_ctrl_mbox: Timed out\n");
+ return -EBUSY;
+ }
+ }
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ return 0;
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_q *q;
+ u32 count, pi, ci;
+ u8 __iomem *qidx;
+ u64 *mbuf;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+ count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask);
+ if (!count)
+ return -EAGAIN;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci);
+ mbuf = (u64 *)msg->msg;
+
+ mutex_lock(&mbox->f2hq_lock);
+
+ msg->hdr.word0 = readq(qidx);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ ci = octep_ctrl_mbox_circq_inc(ci, q->mask);
+ writel(ci, q->hw_cons);
+
+ mutex_unlock(&mbox->f2hq_lock);
+
+ if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req)
+ return 0;
+
+ mbox->process_req(mbox->user_ctx, msg);
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(msg->hdr.word0, qidx);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000000..2dc5753cfec6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to target queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox target to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+/* Size of mbox queue in bytes */
+#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt))
+/* Size of mbox in bytes */
+#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \
+ OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt))
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 word0;
+ struct {
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* size of message in words excluding header */
+ u32 sizew;
+ };
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* q element size, should be aligned to unsigned long */
+ u16 elem_sz;
+ /* q element count, should be power of 2 */
+ u16 elem_cnt;
+ /* q mask */
+ u16 mask;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* host driver version */
+ u64 version;
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* user context for callback, can be null */
+ void *user_ctx;
+ /* callback handler for processing request, called from octep_ctrl_mbox_recv */
+ int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg);
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param ep: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000000..7c00c896ab98
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+int octep_get_link_status(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ return resp->link.state;
+}
+
+void octep_set_link_status(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+void octep_set_rx_state(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ memcpy(addr, resp->mac.addr, ETH_ALEN);
+
+ return err;
+}
+
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req.mac.addr, addr, ETH_ALEN);
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_set_mtu(struct octep_device *oct, int mtu)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.mtu.val = mtu;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_if_stats(struct octep_device *oct)
+{
+ void __iomem *iface_rx_stats;
+ void __iomem *iface_tx_stats;
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+ req.get_stats.offset = oct->ctrl_mbox_ifstats_offset;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset;
+ iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset +
+ sizeof(struct octep_iface_rx_stats);
+ memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats));
+
+ return err;
+}
+
+int octep_get_link_info(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ oct->link_info.supported_modes = resp->link_info.supported_modes;
+ oct->link_info.advertised_modes = resp->link_info.advertised_modes;
+ oct->link_info.autoneg = resp->link_info.autoneg;
+ oct->link_info.pause = resp->link_info.pause;
+ oct->link_info.speed = resp->link_info.speed;
+
+ return err;
+}
+
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link_info.info.advertised_modes = link_info->advertised_modes;
+ req.link_info.info.autoneg = link_info->autoneg;
+ req.link_info.info.pause = link_info->pause;
+ req.link_info.info.speed = link_info->speed;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000000..f23b58381322
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+};
+
+struct octep_ctrl_net_req_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_req_cmd_get_stats {
+ /* offset into barmem where fw should copy over stats */
+ u32 offset;
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+struct octep_ctrl_net_resp_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Size of host to fw octep_ctrl_mbox queue element */
+union octep_ctrl_net_h2f_data_sz {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+};
+
+/* Size of fw to host octep_ctrl_mbox queue element */
+union octep_ctrl_net_f2h_data_sz {
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+/* size of host to fw data in words */
+#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size of fw to host data in words */
+#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size in words of get/set mtu request */
+#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2
+/* size in words of get/set mac request */
+#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2
+/* size in words of get stats request */
+#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2
+/* size in words of get/set state request */
+#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2
+/* size in words of get/set link info request */
+#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4
+
+/* size in words of get mtu response */
+#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2
+/* size in words of set mtu response */
+#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1
+/* size in words of get mac response */
+#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2
+/* size in words of set mac response */
+#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1
+/* size in words of get state request */
+#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2
+/* size in words of set state request */
+#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1
+/* size in words of get link info request */
+#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4
+/* size in words of set link info request */
+#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_get_link_status(struct octep_device *oct);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_link_status(struct octep_device *oct, bool up);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_rx_state(struct octep_device *oct, bool up);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ */
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param mtu: mtu.
+ */
+int octep_set_mtu(struct octep_device *oct, int mtu);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_if_stats(struct octep_device *oct);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_link_info(struct octep_device *oct);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000000..87ef129b269a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ octep_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_set_link_info(oct, &link_info_new);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000000..e020c81f3455
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i > num_non_ioq_msix) {
+ --i;
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll, 64);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_set_rx_state(oct, true);
+
+ ret = octep_get_link_status(oct);
+ if (!ret)
+ octep_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_get_link_status(oct);
+ if (ret)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_set_link_status(oct, false);
+ octep_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ iq->stats.instr_posted++;
+ skb_tx_timestamp(skb);
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ octep_get_if_stats(oct);
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req req = {};
+ struct octep_ctrl_mbox_msg msg;
+ int ret = 0;
+
+ msg.msg = &req;
+ while (true) {
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg);
+ if (ret)
+ break;
+
+ switch (req.hdr.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req.link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req.hdr.cmd);
+ break;
+ }
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ dev_info(&pdev->dev,
+ "Setting up OCTEON CN93XX PF PASS%d.%d\n",
+ OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return -1;
+ }
+ oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
+ ctrl_mbox->h2fq.elem_cnt,
+ ctrl_mbox->f2hq.elem_sz,
+ ctrl_mbox->f2hq.elem_cnt);
+
+ return 0;
+
+unsupported_dev:
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ if (oct->mbox[i])
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000000..025626a61383
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000000..cc51149790ff
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000000..945947ec7723
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = (struct octep_rx_buffer *)
+ vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ if (oq->buff_info)
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000000..782a24f27f3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000000..511552bc3e87
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ if (iq->buff_info)
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000000..2ef57980eb47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d1eddb769a41..2ad73b180276 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -248,7 +248,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
@@ -407,7 +407,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
/* Get the maximum width of a column */
lf_str_size = get_max_column_width(rvu);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 47c899c08951..3a141f2db812 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -35,6 +35,10 @@ struct prestera_acl_rule_entry {
u8 valid:1;
} accept, drop, trap;
struct {
+ u8 valid:1;
+ struct prestera_acl_action_police i;
+ } police;
+ struct {
struct prestera_acl_action_jump i;
u8 valid:1;
} jump;
@@ -421,13 +425,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
rule->re_arg.vtcam_id = ruleset->vtcam_id;
rule->re_key.prio = rule->priority;
- /* setup counter */
- rule->re_arg.count.valid = true;
- err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
- &rule->re_arg.count.client);
- if (err)
- goto err_rule_add;
-
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
if (err)
@@ -540,6 +537,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
act_num++;
}
+ /* police */
+ if (e->police.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE;
+ act_hw[act_num].police = e->police.i;
+ act_num++;
+ }
/* jump */
if (e->jump.valid) {
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
@@ -564,6 +567,9 @@ __prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
{
/* counter */
prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
+ /* police */
+ if (e->police.valid)
+ prestera_hw_policer_release(sw, e->police.i.id);
}
void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
@@ -586,6 +592,8 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
struct prestera_acl_rule_entry *e,
struct prestera_acl_rule_entry_arg *arg)
{
+ int err;
+
/* accept */
e->accept.valid = arg->accept.valid;
/* drop */
@@ -595,10 +603,26 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
/* jump */
e->jump.valid = arg->jump.valid;
e->jump.i = arg->jump.i;
+ /* police */
+ if (arg->police.valid) {
+ u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS :
+ PRESTERA_POLICER_TYPE_EGRESS;
+
+ err = prestera_hw_policer_create(sw, type, &e->police.i.id);
+ if (err)
+ goto err_out;
+
+ err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id,
+ arg->police.rate,
+ arg->police.burst);
+ if (err) {
+ prestera_hw_policer_release(sw, e->police.i.id);
+ goto err_out;
+ }
+ e->police.valid = arg->police.valid;
+ }
/* counter */
if (arg->count.valid) {
- int err;
-
err = prestera_counter_get(sw->counter, arg->count.client,
&e->counter.block,
&e->counter.id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 6d2ad27682d1..f963e1e0c0f0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -56,6 +56,7 @@ enum prestera_acl_rule_action {
PRESTERA_ACL_RULE_ACTION_TRAP = 2,
PRESTERA_ACL_RULE_ACTION_JUMP = 5,
PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+ PRESTERA_ACL_RULE_ACTION_POLICE = 8,
PRESTERA_ACL_RULE_ACTION_MAX
};
@@ -74,6 +75,10 @@ struct prestera_acl_action_jump {
u32 index;
};
+struct prestera_acl_action_police {
+ u32 id;
+};
+
struct prestera_acl_action_count {
u32 id;
};
@@ -86,6 +91,7 @@ struct prestera_acl_rule_entry_key {
struct prestera_acl_hw_action_info {
enum prestera_acl_rule_action id;
union {
+ struct prestera_acl_action_police police;
struct prestera_acl_action_count count;
struct prestera_acl_action_jump jump;
};
@@ -107,6 +113,12 @@ struct prestera_acl_rule_entry_arg {
} jump;
struct {
u8 valid:1;
+ u64 rate;
+ u64 burst;
+ bool ingress;
+ } police;
+ struct {
+ u8 valid:1;
u32 client;
} count;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 921959a980ee..d43e503c644f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -70,6 +70,24 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+ /* setup counter first */
+ rule->re_arg.count.valid = true;
+ err = prestera_acl_chain_to_client(chain_index,
+ &rule->re_arg.count.client);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
@@ -90,6 +108,16 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.trap.valid = 1;
break;
+ case FLOW_ACTION_POLICE:
+ if (rule->re_arg.police.valid)
+ return -EEXIST;
+
+ rule->re_arg.police.valid = 1;
+ rule->re_arg.police.rate =
+ act->police.rate_bytes_ps;
+ rule->re_arg.police.burst = act->police.burst;
+ rule->re_arg.police.ingress = true;
+ break;
case FLOW_ACTION_GOTO:
err = prestera_flower_parse_goto_action(block, rule,
chain_index,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index c66cc929c820..79fd3cac539d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -74,6 +74,10 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
+ PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
+ PRESTERA_CMD_TYPE_POLICER_SET = 0x1502,
+
PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
PRESTERA_CMD_TYPE_ACK = 0x10000,
@@ -164,6 +168,10 @@ enum {
};
enum {
+ PRESTERA_POLICER_MODE_SR_TCM
+};
+
+enum {
PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
@@ -430,6 +438,9 @@ struct prestera_msg_acl_action {
} jump;
struct {
__le32 id;
+ } police;
+ struct {
+ __le32 id;
} count;
__le32 reserved[6];
};
@@ -570,6 +581,26 @@ struct mvsw_msg_cpu_code_counter_ret {
__le64 packet_count;
};
+struct prestera_msg_policer_req {
+ struct prestera_msg_cmd cmd;
+ __le32 id;
+ union {
+ struct {
+ __le64 cir;
+ __le32 cbs;
+ } __packed sr_tcm; /* make sure always 12 bytes size */
+ __le32 reserved[6];
+ };
+ u8 mode;
+ u8 type;
+ u8 pad[2];
+};
+
+struct prestera_msg_policer_resp {
+ struct prestera_msg_ret ret;
+ __le32 id;
+};
+
struct prestera_msg_event {
__le16 type;
__le16 id;
@@ -622,6 +653,7 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
@@ -640,6 +672,7 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -1192,6 +1225,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
case PRESTERA_ACL_RULE_ACTION_JUMP:
action->jump.index = __cpu_to_le32(info->jump.index);
break;
+ case PRESTERA_ACL_RULE_ACTION_POLICE:
+ action->police.id = __cpu_to_le32(info->police.id);
+ break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
@@ -2163,3 +2199,48 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
&req.cmd, sizeof(req));
}
+
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id)
+{
+ struct prestera_msg_policer_resp resp;
+ struct prestera_msg_policer_req req = {
+ .type = type
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *policer_id = __le32_to_cpu(resp.id);
+ return 0;
+}
+
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id)
+{
+ struct prestera_msg_policer_req req = {
+ .id = __cpu_to_le32(policer_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs)
+{
+ struct prestera_msg_policer_req req = {
+ .mode = PRESTERA_POLICER_MODE_SR_TCM,
+ .id = __cpu_to_le32(policer_id),
+ .sr_tcm = {
+ .cir = __cpu_to_le64(cir),
+ .cbs = __cpu_to_le32(cbs)
+ }
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
+ &req.cmd, sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index fd896a8838bb..579d9ba23ffc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -107,6 +107,11 @@ enum {
PRESTERA_STP_FORWARD,
};
+enum {
+ PRESTERA_POLICER_TYPE_INGRESS,
+ PRESTERA_POLICER_TYPE_EGRESS
+};
+
enum prestera_hw_cpu_code_cnt_t {
PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
@@ -288,4 +293,12 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
enum prestera_hw_cpu_code_cnt_t counter_type,
u64 *packet_count);
+/* Policer API */
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id);
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id);
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 6c5618cf4f08..3754d8aec76d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
#include <net/switchdev.h>
#include <linux/rhashtable.h>
@@ -26,7 +27,7 @@ struct prestera_kern_fib_cache {
/* Indicate if route is not overlapped by another table */
struct rhash_head ht_node; /* node of prestera_router */
struct fib_info *fi;
- u8 kern_tos;
+ dscp_t kern_dscp;
u8 kern_type;
bool reachable;
};
@@ -88,7 +89,7 @@ prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_create(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key,
- struct fib_info *fi, u8 tos, u8 type)
+ struct fib_info *fi, dscp_t dscp, u8 type)
{
struct prestera_kern_fib_cache *fib_cache;
int err;
@@ -100,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
memcpy(&fib_cache->key, key, sizeof(*key));
fib_info_hold(fi);
fib_cache->fi = fi;
- fib_cache->kern_tos = tos;
+ fib_cache->kern_dscp = dscp;
fib_cache->kern_type = type;
err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
@@ -132,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
fri.tb_id = fc->key.kern_tb_id;
fri.dst = fc->key.addr.u.ipv4;
fri.dst_len = fc->key.prefix_len;
- fri.tos = fc->kern_tos;
+ fri.dscp = fc->kern_dscp;
fri.type = fc->kern_type;
/* flags begin */
fri.offload = offload;
@@ -305,7 +306,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
if (replace) {
fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
fen_info->fi,
- fen_info->tos,
+ fen_info->dscp,
fen_info->type);
if (!fib_cache) {
dev_err(sw->dev->dev, "fib_cache == NULL");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index cf03c67fbf40..c1e985416c0e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -50,7 +50,6 @@
#define PHY_RETRIES 1000
#define ETH_JUMBO_MTU 9000
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define BLINK_MS 250
#define LINK_HZ HZ
@@ -3833,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_HIGHDMA;
skge = netdev_priv(dev);
- netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &skge->napi, skge_poll, NAPI_POLL_WEIGHT);
skge->netdev = dev;
skge->hw = hw;
skge->msg_enable = netif_msg_init(debug, default_msg);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ea16b1dd6a98..a1e907c85217 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -63,7 +63,6 @@
#define TX_DEF_PENDING 63
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
@@ -4938,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_POLL_WEIGHT);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 86d356b4388d..da4ec235d146 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
+config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 79d4cdbbcbf5..45ba0970504a 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,4 +5,9 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f02d07ec5ccb..31c5da5d6b72 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -20,9 +21,11 @@
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
#include <linux/jhash.h>
+#include <linux/bitfield.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -786,7 +789,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
cnt * sizeof(struct mtk_tx_dma),
&eth->phy_scratch_ring,
GFP_ATOMIC);
@@ -798,10 +801,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(!eth->scratch_head))
return -ENOMEM;
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
phy_ring_tail = eth->phy_scratch_ring +
@@ -855,26 +858,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
@@ -952,9 +955,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(eth->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -993,10 +996,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@@ -1237,7 +1240,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
- u32 hash;
+ u32 hash, reason;
int mac;
ring = mtk_get_rx_ring(eth);
@@ -1274,18 +1277,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
new_data + NET_SKB_PAD +
eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_unmap_single(eth->dev, trxd.rxd1,
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
/* receive data */
@@ -1313,6 +1316,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
}
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe, skb,
+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
(trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1558,7 +1566,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
@@ -1576,7 +1584,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys_pdma,
GFP_ATOMIC);
if (!ring->dma_pdma)
@@ -1635,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma),
ring->dma,
ring->phys);
@@ -1643,7 +1651,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (ring->dma_pdma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
ring->dma_pdma,
ring->phys_pdma);
@@ -1688,18 +1696,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
rx_dma_size * sizeof(*ring->dma),
&ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
@@ -1735,7 +1743,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
continue;
if (!ring->dma[i].rxd1)
continue;
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
ring->dma[i].rxd1,
ring->buf_size,
DMA_FROM_DEVICE);
@@ -1746,7 +1754,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
@@ -2099,7 +2107,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
eth->scratch_ring,
eth->phy_scratch_ring);
@@ -2267,7 +2275,7 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
gdm_config = MTK_GDMA_TO_PPE;
mtk_gdm_config(eth, gdm_config);
@@ -2341,7 +2349,7 @@ static int mtk_stop(struct net_device *dev)
mtk_dma_free(eth);
if (eth->soc->offload_version)
- mtk_ppe_stop(&eth->ppe);
+ mtk_ppe_stop(eth->ppe);
return 0;
}
@@ -2448,6 +2456,8 @@ static void mtk_dim_tx(struct work_struct *work)
static int mtk_hw_init(struct mtk_eth *eth)
{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2460,6 +2470,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
@@ -3040,6 +3054,35 @@ free_netdev:
return err;
}
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
static int mtk_probe(struct platform_device *pdev)
{
struct device_node *mac_np;
@@ -3053,6 +3096,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
@@ -3101,6 +3145,16 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
+
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
GFP_KERNEL);
@@ -3123,6 +3177,22 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ for (i = 0;; i++) {
+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ static const u32 wdma_regs[] = {
+ MTK_WDMA0_BASE,
+ MTK_WDMA1_BASE
+ };
+ void __iomem *wdma;
+
+ if (!np || i >= ARRAY_SIZE(wdma_regs))
+ break;
+
+ wdma = eth->base + wdma_regs[i];
+ mtk_wed_add_hw(np, eth, wdma, i);
+ }
+
for (i = 0; i < 3; i++) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
eth->irq[i] = eth->irq[0];
@@ -3198,10 +3268,11 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- err = mtk_ppe_init(&eth->ppe, eth->dev,
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
+ if (!eth->ppe) {
+ err = -ENOMEM;
goto err_free_dev;
+ }
err = mtk_eth_offload_init(eth);
if (err)
@@ -3227,9 +3298,9 @@ static int mtk_probe(struct platform_device *pdev)
*/
init_dummy_netdev(&eth->dummy_dev);
netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- MTK_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- MTK_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
platform_set_drvdata(pdev, eth);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index c9d42be314b5..b04977fa84f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -24,7 +24,6 @@
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_DMA_SIZE 512
-#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
@@ -295,6 +294,9 @@
#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
+#define MTK_WDMA0_BASE 0x2800
+#define MTK_WDMA1_BASE 0x2c00
+
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -465,6 +467,12 @@
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
+
/* SGMII subsystem config registers */
/* Register to auto-negotiation restart */
#define SGMSYS_PCS_CONTROL_1 0x0
@@ -882,6 +890,7 @@ struct mtk_sgmii {
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
+ * @dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
@@ -925,6 +934,7 @@ struct mtk_sgmii {
struct mtk_eth {
struct device *dev;
+ struct device *dma_dev;
void __iomem *base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
@@ -974,7 +984,7 @@ struct mtk_eth {
u32 rx_dma_l4_valid;
int ip_align;
- struct mtk_ppe ppe;
+ struct mtk_ppe *ppe;
struct rhashtable flow_table;
};
@@ -1023,6 +1033,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c793308..683f89f8e3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -6,9 +6,22 @@
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
return ppe_m32(ppe, reg, val, 0);
}
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
u32 hash;
switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
- case MTK_PPE_PKT_TYPE_BRIDGE:
- hv1 = e->bridge.src_mac_lo;
- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
- hv2 = e->bridge.src_mac_hi >> 16;
- hv2 ^= e->bridge.dest_mac_lo;
- hv3 = e->bridge.dest_mac_hi;
- break;
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
return 0;
}
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ int bss, int wcid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ u32 *ib2 = mtk_foe_entry_ib2(entry);
+
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+
+ return 0;
+}
+
static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
{
return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
}
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_BIND);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u16 timestamp;
+ u16 now;
+
+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+
+ if (timestamp > now)
+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
- u32 hash;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
+
+ hwe = &ppe->foe_table[cur->hash];
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry *hwe;
+ struct mtk_foe_entry foe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
+
+ if (entry->hash == 0xffff)
+ goto out;
+
+ hwe = &ppe->foe_table[entry->hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ if (!mtk_flow_entry_match(entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_foe_entry *hwe;
+ u16 timestamp;
+ timestamp = mtk_eth_timestamp(ppe->eth);
timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
- hash = mtk_ppe_hash_entry(entry);
hwe = &ppe->foe_table[hash];
- if (!mtk_foe_entry_usable(hwe)) {
- hwe++;
- hash++;
-
- if (!mtk_foe_entry_usable(hwe))
- return -ENOSPC;
- }
-
memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
wmb();
hwe->ib1 = entry->ib1;
@@ -362,32 +519,195 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
dma_wmb();
mtk_ppe_cache_clear(ppe);
+}
- return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(&entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+ GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = &ppe->foe_table[hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(&foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
}
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ struct hlist_head *head = &ppe->foe_flow[hash / 2];
+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ tag += 4;
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version)
{
+ struct device *dev = eth->dev;
struct mtk_foe_entry *foe;
+ struct mtk_ppe *ppe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
+ ppe->eth = eth;
ppe->dev = dev;
ppe->version = version;
foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return -ENOMEM;
+ return NULL;
ppe->foe_table = foe;
mtk_ppe_debugfs_init(ppe);
- return 0;
+ return ppe;
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@ -443,7 +763,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
- MTK_PPE_FLOW_CFG_L2_BRIDGE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 242fb8f2ae65..1f5cf1c9a947 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
+#include <linux/rhashtable.h>
#define MTK_ETH_PPE_BASE 0xc00
@@ -48,9 +49,9 @@ enum {
#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
#define MTK_FOE_IB2_MULTICAST BIT(8)
-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
@@ -58,9 +59,9 @@ enum {
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
enum {
MTK_FOE_STATE_INVALID,
@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
u16 src_mac_lo;
};
+/* software-only entry type */
struct mtk_foe_bridge {
- u32 dest_mac_hi;
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 vlan;
- u16 src_mac_lo;
- u16 dest_mac_lo;
-
- u32 src_mac_hi;
+ struct {} key_end;
u32 ib2;
- u32 _rsv[5];
-
- u32 udf_tsid;
struct mtk_foe_mac_info l2;
};
@@ -235,7 +233,37 @@ enum {
MTK_PPE_CPU_REASON_INVALID = 0x1f,
};
+enum {
+ MTK_FLOW_TYPE_L4,
+ MTK_FLOW_TYPE_L2,
+ MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+ union {
+ struct hlist_node list;
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+ };
+ };
+ u8 type;
+ s8 wed_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+ struct {
+ struct mtk_flow_entry *base_flow;
+ struct hlist_node list;
+ struct {} end;
+ } l2_data;
+ };
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
struct mtk_ppe {
+ struct mtk_eth *eth;
struct device *dev;
void __iomem *base;
int version;
@@ -243,19 +271,35 @@ struct mtk_ppe {
struct mtk_foe_entry *foe_table;
dma_addr_t foe_phys;
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+
+ struct rhashtable l2_flows;
+
void *acct_table;
};
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
int mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- ppe->foe_table[hash].ib1 = 0;
- dma_wmb();
+ u16 now, diff;
+
+ if (!ppe)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ __mtk_ppe_check_skb(ppe, skb, hash);
}
static inline int
@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp);
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ int bss, int wcid);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index d4b482340cb9..eb0b598f14e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
static const char * const type_str[] = {
[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 7bb1f20002b5..1fe31058b0f2 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -6,10 +6,12 @@
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
struct mtk_flow_data {
struct ethhdr eth;
@@ -19,11 +21,18 @@ struct mtk_flow_data {
__be32 src_addr;
__be32 dst_addr;
} v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
};
__be16 src_port;
__be16 dst_port;
+ u16 vlan_in;
+
struct {
u16 id;
__be16 proto;
@@ -35,12 +44,6 @@ struct mtk_flow_data {
} pppoe;
};
-struct mtk_flow_entry {
- struct rhash_head node;
- unsigned long cookie;
- u16 hash;
-};
-
static const struct rhashtable_params mtk_flow_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, node),
.key_offset = offsetof(struct mtk_flow_entry, cookie),
@@ -48,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
.automatic_shrinking = true,
};
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
-{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
-}
-
static int
mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
bool egress)
@@ -63,6 +60,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
data->v4.dst_addr, data->dst_port);
}
+static int
+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+{
+ return mtk_foe_entry_set_ipv6_tuple(foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
+}
+
static void
mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
{
@@ -80,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+ struct net_device_path_ctx ctx = {
+ .dev = dev,
+ .daddr = addr,
+ };
+ struct net_device_path path = {};
+
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
+ if (!dev->netdev_ops->ndo_fill_forward_path)
+ return -1;
+
+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
+ return -1;
+
+ if (path.type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->wdma_idx = path.mtk_wdma.wdma_idx;
+ info->queue = path.mtk_wdma.queue;
+ info->bss = path.mtk_wdma.bss;
+ info->wcid = path.mtk_wdma.wcid;
+
+ return 0;
+}
+
static int
mtk_flow_mangle_ports(const struct flow_action_entry *act,
@@ -149,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- struct net_device *dev)
+ struct net_device *dev, const u8 *dest_mac,
+ int *wed_index)
{
+ struct mtk_wdma_info info = {};
int pse_port, dsa_port;
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
+ info.wcid);
+ pse_port = 3;
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
+
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dsa_port >= 0)
mtk_foe_entry_set_dsa(foe, dsa_port);
@@ -164,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
else
return -EOPNOTSUPP;
+out:
mtk_foe_entry_set_pse_port(foe, pse_port);
return 0;
@@ -179,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
+ int wed_index = -1;
u16 addr_type = 0;
- u32 timestamp;
u8 l4proto = 0;
int err = 0;
- int hash;
int i;
if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
@@ -215,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return -EOPNOTSUPP;
}
+ switch (addr_type) {
+ case 0:
+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan_in = match.key->vlan_id;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
mtk_flow_offload_mangle_eth(act, &data.eth);
break;
@@ -249,14 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
}
- switch (addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
@@ -270,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
- } else {
+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
@@ -288,10 +363,24 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
mtk_flow_set_ipv4_addr(&foe, &data, false);
}
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv6_addr(&foe, &data);
+ }
+
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
@@ -317,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return err;
}
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ foe.bridge.vlan = data.vlan_in;
+
if (data.vlan.num == 1) {
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
@@ -326,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev);
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
if (err)
return err;
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->cookie = f->cookie;
- timestamp = mtk_eth_timestamp(eth);
- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
- if (hash < 0) {
- err = hash;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+
+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
goto free;
- }
- entry->hash = hash;
err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (err < 0)
- goto clear_flow;
+ goto clear;
return 0;
-clear_flow:
- mtk_foe_entry_clear(&eth->ppe, hash);
+
+clear:
+ mtk_foe_entry_clear(eth->ppe, entry);
free:
kfree(entry);
+ if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);
return err;
}
@@ -366,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(&eth->ppe, entry->hash);
+ mtk_foe_entry_clear(eth->ppe, entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+ mtk_wed_flow_remove(entry->wed_index);
kfree(entry);
return 0;
@@ -378,7 +477,6 @@ static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
- int timestamp;
u32 idle;
entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -386,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
- if (timestamp < 0)
- return -ETIMEDOUT;
-
- idle = mtk_eth_timestamp(eth) - timestamp;
+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -442,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe.foe_table)
+ if (!eth->ppe || !eth->ppe->foe_table)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -483,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- if (type == TC_SETUP_FT)
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
return mtk_eth_setup_tc_block(dev, type_data);
-
- return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
}
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe.foe_table)
+ if (!eth->ppe || !eth->ppe->foe_table)
return 0;
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 4cd0747edaff..95839fd84dab 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -30,7 +30,6 @@
#define MTK_STAR_WAIT_TIMEOUT 300
#define MTK_STAR_MAX_FRAME_SIZE 1514
#define MTK_STAR_SKB_ALIGNMENT 16
-#define MTK_STAR_NAPI_WEIGHT 64
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
@@ -1551,7 +1550,7 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
return devm_register_netdev(dev, ndev);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 000000000000..8f0cd3196aac
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+
+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+
+#define MTK_WED_TX_RING_SIZE 2048
+#define MTK_WED_WDMA_RING_SIZE 1024
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, 0, mask);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_RESET);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 status;
+
+ wed_w32(dev, MTK_WED_RESET, mask);
+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+ !(status & mask), 0, 1000))
+ WARN_ON_ONCE(1);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw || hw->wed_dev)
+ return NULL;
+
+ hw->wed_dev = dev;
+ return hw;
+}
+
+static int
+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+ int i, page_idx;
+
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->buf_ring.size = ring_size;
+ dev->buf_ring.pages = page_list;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->buf_ring.desc = desc;
+ dev->buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx++] = page;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+ u32 txd_size;
+ u32 ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
+ void **page_list = dev->buf_ring.pages;
+ int page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
+
+ if (!page)
+ break;
+
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
+ desc, dev->buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+ ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mutex_lock(&hw_lock);
+
+ mtk_wed_stop(dev);
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+ if (of_dma_is_coherent(wlan_node))
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+
+ if (!hw_list[!hw->index]->wed_dev &&
+ hw->eth->dma_dev != hw->eth->dev)
+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+ memset(dev, 0, sizeof(*dev));
+ module_put(THIS_MODULE);
+
+ hw->wed_dev = NULL;
+ mutex_unlock(&hw_lock);
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+ u32 mask, set;
+ u32 offset;
+
+ mtk_wed_stop(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+
+ offset = dev->hw->index ? 0x04000400 : 0;
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+ if (dev->init_done)
+ return;
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start + dev->wlan.nbuf - 1));
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ desc[i].buf0 = 0;
+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc[i].buf1 = 0;
+ desc[i].info = 0;
+ }
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev)
+{
+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) &
+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
+ return true;
+
+ return false;
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev);
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+ bool busy = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
+
+ if (!desc)
+ continue;
+
+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+ }
+
+ if (mtk_wed_poll_busy(dev))
+ busy = mtk_wed_check_busy(dev);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+ wed_w32(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_TX |
+ MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+ }
+
+ for (i = 0; i < 100; i++) {
+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ break;
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+ MTK_WED_WPDMA_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+ }
+
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev,
+ size * sizeof(*ring->desc),
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->size = size;
+ mtk_wed_ring_reset(ring->desc, size);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+{
+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+
+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+
+ return 0;
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ u32 wdma_mask;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ if (!dev->tx_wdma[i].desc)
+ mtk_wed_wdma_ring_setup(dev, i, 16);
+
+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+ mtk_wed_hw_init(dev);
+
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ mtk_wed_set_ext_int(dev, true);
+ val = dev->wlan.wpdma_phys |
+ MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
+
+ if (dev->hw->index)
+ val |= BIT(1);
+ val |= BIT(0);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+
+ dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+ __releases(RCU)
+{
+ struct mtk_wed_hw *hw;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+ rcu_read_unlock();
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw_lock);
+
+ hw = mtk_wed_assign(dev);
+ if (!hw) {
+ module_put(THIS_MODULE);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+ mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+ ret = mtk_wed_buffer_alloc(dev);
+ if (ret) {
+ mtk_wed_detach(dev);
+ goto out;
+ }
+
+ mtk_wed_hw_init_early(dev);
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+ /*
+ * Tx ring redirection:
+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+ * registers.
+ *
+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+ * into MTK_WED_WPDMA_RING_TX(n) registers.
+ * It gets filled with packets picked up from WED TX ring and from
+ * WDMA RX.
+ */
+
+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+
+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_TX_RING_SIZE);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+ int i;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+ ring->reg_base = MTK_WED_RING_RX(1);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+ }
+
+ return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 val;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+ val = wed_r32(dev, MTK_WED_INT_STATUS);
+ val &= mask;
+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+ return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+ if (!dev->running)
+ return;
+
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+ int ret;
+
+ if (!hw || !hw->wed_dev)
+ return -ENODEV;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
+ return 0;
+ }
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+ if (!ret)
+ hw->num_flows++;
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+
+ if (!hw)
+ return;
+
+ if (--hw->num_flows)
+ return;
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index)
+{
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+ .tx_ring_setup = mtk_wed_tx_ring_setup,
+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .start = mtk_wed_start,
+ .stop = mtk_wed_stop,
+ .reset_dma = mtk_wed_reset_dma,
+ .reg_read = wed_r32,
+ .reg_write = wed_w32,
+ .irq_get = mtk_wed_irq_get,
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+ struct mtk_wed_hw *hw;
+ struct regmap *regs;
+ int irq;
+
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return;
+
+ get_device(&pdev->dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return;
+
+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(regs))
+ return;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+ mutex_lock(&hw_lock);
+
+ if (WARN_ON(hw_list[index]))
+ goto unlock;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
+
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+
+unlock:
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_exit(void)
+{
+ int i;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+ synchronize_rcu();
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[i];
+ if (!hw)
+ continue;
+
+ hw_list[i] = NULL;
+ debugfs_remove(hw->debugfs_dir);
+ put_device(hw->dev);
+ kfree(hw);
+ }
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..981ec613f4b0
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+
+struct mtk_wed_hw {
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ u32 debugfs_reg;
+ u32 num_flows;
+ char dirname[5];
+ int irq;
+ int index;
+};
+
+struct mtk_wdma_info {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(dev->hw->regs, reg, &val);
+
+ return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ if (!dev->txfree_ring.wpdma)
+ return 0;
+
+ return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ if (!dev->txfree_ring.wpdma)
+ return;
+
+ writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+ return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 000000000000..a81d3fd1a439
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+ const char *name;
+ u16 offset;
+ u8 type;
+ u8 base;
+};
+
+enum {
+ DUMP_TYPE_STRING,
+ DUMP_TYPE_WED,
+ DUMP_TYPE_WDMA,
+ DUMP_TYPE_WPDMA_TX,
+ DUMP_TYPE_WPDMA_TXFREE,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+ seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ const struct reg_dump *regs, int n_regs)
+{
+ const struct reg_dump *cur;
+ u32 val;
+
+ for (cur = regs; cur < &regs[n_regs]; cur++) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ cur > regs ? "\n" : "",
+ cur->name);
+ continue;
+ case DUMP_TYPE_WED:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ }
+ print_reg_val(s, cur->name, val);
+ }
+}
+
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED TX"),
+ DUMP_WED(WED_TX_MIB(0)),
+ DUMP_WED_RING(WED_RING_TX(0)),
+
+ DUMP_WED(WED_TX_MIB(1)),
+ DUMP_WED_RING(WED_RING_TX(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WPDMA_TX_RING(0),
+ DUMP_WPDMA_TX_RING(1),
+
+ DUMP_STR("WED WDMA RX"),
+ DUMP_WED(WED_WDMA_RX_MIB(0)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+ DUMP_WED(WED_WDMA_RX_THRES(0)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+ DUMP_WED(WED_WDMA_RX_MIB(1)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+ DUMP_WED(WED_WDMA_RX_THRES(1)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+ DUMP_STR("WDMA RX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+ struct mtk_wed_hw *hw = data;
+
+ regmap_write(hw->regs, hw->debugfs_reg, val);
+
+ return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+ struct mtk_wed_hw *hw = data;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+ "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+ struct dentry *dir;
+
+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+ dir = debugfs_create_dir(hw->dirname, NULL);
+ if (!dir)
+ return;
+
+ hw->debugfs_dir = dir;
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 000000000000..a5d9d8a5bce2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 000000000000..0a0465ea58b4
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
+struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_RESET 0x008
+#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_WED BIT(31)
+
+#define MTK_WED_CTRL 0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS 0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK 0x028
+
+#define MTK_WED_STATUS 0x060
+#define MTK_WED_STATUS_TX GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL 0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_BM_BASE 0x084
+
+#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN 0x08c
+
+#define MTK_WED_TX_BM_INTF 0x09c
+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR 0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_INT_STATUS 0x200
+#define MTK_WED_INT_MASK 0x204
+
+#define MTK_WED_GLO_CFG 0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_RESET_IDX 0x20c
+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG 0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_WPDMA_RESET_IDX 0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_CTRL 0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+
+#define MTK_WED_WPDMA_INT_MASK 0x524
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+
+#define MTK_WED_PCIE_INT_TRIGGER 0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+#define MTK_WED_WPDMA_CFG_BASE 0x580
+
+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG 0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX 0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WDMA_OFFSET0 0xaa4
+#define MTK_WED_WDMA_OFFSET1 0xaa8
+
+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+
+#define MTK_WED_RING_OFS_BASE 0x00
+#define MTK_WED_RING_OFS_COUNT 0x04
+#define MTK_WED_RING_OFS_CPU_IDX 0x08
+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG 0x204
+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
+
+#define MTK_WDMA_RESET_IDX 0x208
+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WDMA_INT_MASK 0x228
+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
+#define MTK_WDMA_INT_GRP1 0x250
+#define MTK_WDMA_INT_GRP2 0x254
+
+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP 0x008
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 4ba1a78c6515..bfc0cd5ec423 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
-config MLX5_ACCEL
- bool
-
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
- select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_FPGA_IPSEC
- bool "Mellanox Technologies IPsec Innova support"
- depends on MLX5_CORE
- depends on MLX5_FPGA
- help
- Build IPsec support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_IPSEC
- bool "Mellanox Technologies IPsec Connect-X support"
- depends on MLX5_CORE_EN
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- select MLX5_ACCEL
- help
- Build IPsec support for the Connect-X family of network cards by Mellanox
- Technologies.
- Note: If you select this option, the mlx5_core driver will include
- IPsec support for the Connect-X family.
-
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload acceleration"
+ bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
-
-config MLX5_FPGA_TLS
- bool "Mellanox Technologies TLS Innova support"
- depends on TLS_DEVICE
- depends on TLS=y || MLX5_CORE=m
- depends on MLX5_CORE_EN
- depends on MLX5_FPGA
- select MLX5_EN_TLS
- help
- Build TLS support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-config MLX5_TLS
+config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- select MLX5_ACCEL
- select MLX5_EN_TLS
- help
- Build TLS support for the Connect-X family of network cards by Mellanox
- Technologies.
-
-config MLX5_EN_TLS
- bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 4bc666714a35..81620c25c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
#
# Netdev extra
@@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o en_accel/ipsec_fs.o
+ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
+ en_accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
deleted file mode 100644
index 82b185121edb..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __MLX5E_ACCEL_H__
-#define __MLX5E_ACCEL_H__
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
-{
- __be16 *ethtype;
-
- if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
- return false;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
- return false;
- return true;
-}
-
-static inline void remove_metadata_hdr(struct sk_buff *skb)
-{
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
-
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
-}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
deleted file mode 100644
index 09f5ce97af46..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/ipsec.h"
-#include "mlx5_core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec_offload.h"
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
- int err = 0;
-
- ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
- mlx5_ipsec_offload_ops(mdev) :
- mlx5_fpga_ipsec_ops(mdev);
-
- if (!ipsec_ops || !ipsec_ops->init) {
- mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
- return;
- }
-
- err = ipsec_ops->init(mdev);
- if (err) {
- mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
- return;
- }
-
- mdev->ipsec_ops = ipsec_ops;
-}
-
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->cleanup)
- return;
-
- ipsec_ops->cleanup(mdev);
-}
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->device_caps)
- return 0;
-
- return ipsec_ops->device_caps(mdev);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_count)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_count(mdev);
-}
-
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_read)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_read(mdev, counters, count);
-}
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- __be32 saddr[4] = {}, daddr[4] = {};
-
- if (!ipsec_ops || !ipsec_ops->create_hw_context)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (!xfrm->attrs.is_ipv6) {
- saddr[3] = xfrm->attrs.saddr.a4;
- daddr[3] = xfrm->attrs.daddr.a4;
- } else {
- memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
- memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
- }
-
- return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
- xfrm->attrs.is_ipv6, sa_handle);
-}
-
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->free_hw_context)
- return;
-
- ipsec_ops->free_hw_context(context);
-}
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- struct mlx5_accel_esp_xfrm *xfrm;
-
- if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
- return ERR_PTR(-EOPNOTSUPP);
-
- xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
- if (IS_ERR(xfrm))
- return xfrm;
-
- xfrm->mdev = mdev;
- return xfrm;
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
-
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
- return;
-
- ipsec_ops->esp_destroy_xfrm(xfrm);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
-
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
- return -EOPNOTSUPP;
-
- return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
deleted file mode 100644
index fbb9c5415d53..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_IPSEC_H__
-#define __MLX5_ACCEL_IPSEC_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/accel.h>
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_ipsec_ops {
- u32 (*device_caps)(struct mlx5_core_dev *mdev);
- unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
- int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
- void* (*create_hw_context)(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle);
- void (*free_hw_context)(void *context);
- int (*init)(struct mlx5_core_dev *mdev);
- void (*cleanup)(struct mlx5_core_dev *mdev);
- struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
- int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
- void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
-};
-
-#else
-
-#define MLX5_IPSEC_DEV(mdev) false
-
-static inline void *
-mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- return NULL;
-}
-
-static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
-
-static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
-
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
deleted file mode 100644
index d6667d38e1de..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#include "mlx5_core.h"
-#include "ipsec_offload.h"
-#include "lib/mlx5.h"
-#include "en_accel/ipsec_fs.h"
-
-#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
- MLX5_ACCEL_IPSEC_CAP_LSO)
-
-struct mlx5_ipsec_sa_ctx {
- struct rhash_head hash;
- u32 enc_key_id;
- u32 ipsec_obj_id;
- /* hw ctx */
- struct mlx5_core_dev *dev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-};
-
-struct mlx5_ipsec_esp_xfrm {
- /* reference counter of SA ctx */
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
-{
- u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
-
- if (!mlx5_is_ipsec_device(mdev))
- return 0;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
- return 0;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
- MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
- caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
- caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- /* We can accommodate up to 2^24 different IPsec objects
- * because we use up to 24 bit in flow table metadata
- * to hold the IPsec Object unique handle.
- */
- WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
- return caps;
-}
-
-static int
-mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
- attrs->replay_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
- attrs->keymat_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
- attrs->keymat.aes_gcm.iv_algo);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
- attrs->keymat.aes_gcm.key_len);
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- !MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- int err = 0;
-
- err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
- if (err)
- return ERR_PTR(err);
-
- mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
- if (!mxfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&mxfrm->lock);
- memcpy(&mxfrm->accel_xfrm.attrs, attrs,
- sizeof(mxfrm->accel_xfrm.attrs));
-
- return &mxfrm->accel_xfrm;
-}
-
-static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
- accel_xfrm);
-
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- WARN_ON(mxfrm->sa_ctx);
- kfree(mxfrm);
-}
-
-struct mlx5_ipsec_obj_attrs {
- const struct aes_gcm_keymat *aes_gcm;
- u32 accel_flags;
- u32 esn_msb;
- u32 enc_key_id;
-};
-
-static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 *ipsec_id)
-{
- const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
- void *obj, *salt_p, *salt_iv_p;
- int err;
-
- obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
-
- /* salt and seq_iv */
- salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
- memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
-
- switch (aes_gcm->icv_len) {
- case 64:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B);
- break;
- case 96:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B);
- break;
- case 128:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_16B);
- break;
- default:
- return -EINVAL;
- }
- salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
- memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
- /* esn */
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- MLX5_SET(ipsec_obj, obj, esn_en, 1);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
- }
-
- MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
- return err;
-}
-
-static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *hw_handle)
-{
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
- struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- int err;
-
- /* alloc SA context */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
- mutex_lock(&mxfrm->lock);
- sa_ctx->mxfrm = mxfrm;
-
- /* key */
- err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
- aes_gcm->key_len / BITS_PER_BYTE,
- MLX5_ACCEL_OBJ_IPSEC_KEY,
- &sa_ctx->enc_key_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
- goto err_sa_ctx;
- }
-
- ipsec_attrs.aes_gcm = aes_gcm;
- ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
- ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
- ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
- err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
- &sa_ctx->ipsec_obj_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
- goto err_enc_key;
- }
-
- *hw_handle = sa_ctx->ipsec_obj_id;
- mxfrm->sa_ctx = sa_ctx;
- mutex_unlock(&mxfrm->lock);
-
- return sa_ctx;
-
-err_enc_key:
- mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
-err_sa_ctx:
- mutex_unlock(&mxfrm->lock);
- kfree(sa_ctx);
- return ERR_PTR(err);
-}
-
-static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
-{
- struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
- struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
-
- mutex_lock(&mxfrm->lock);
- mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
- mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
- kfree(sa_ctx);
- mxfrm->sa_ctx = NULL;
- mutex_unlock(&mxfrm->lock);
-}
-
-static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
- u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
- u64 modify_field_select = 0;
- u64 general_obj_types;
- void *obj;
- int err;
-
- if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
- return 0;
-
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return -EINVAL;
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err) {
- mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
- ipsec_id, err);
- return err;
- }
-
- obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
- modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
-
- /* esn */
- if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
- !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
- return -EOPNOTSUPP;
-
- obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
-
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
- return -EOPNOTSUPP;
-
- mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
-
- mutex_lock(&mxfrm->lock);
-
- if (!mxfrm->sa_ctx)
- /* Not bound xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* need to add find and replace in ipsec_rhash_sa the sa_ctx */
- /* modify device with new hw_sa */
- ipsec_attrs.accel_flags = attrs->flags;
- ipsec_attrs.esn_msb = attrs->esn;
- err = mlx5_modify_ipsec_obj(mdev,
- &ipsec_attrs,
- mxfrm->sa_ctx->ipsec_obj_id);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
-
- mutex_unlock(&mxfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
- .device_caps = mlx5_ipsec_offload_device_caps,
- .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
- .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
- .init = mlx5_ipsec_offload_init,
- .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
- .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
- .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_ipsec_offload_device_caps(mdev))
- return NULL;
-
- return &ipsec_offload_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
deleted file mode 100644
index 970c66d19c1d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_OFFLOAD_H__
-#define __MLX5_IPSEC_OFFLOAD_H__
-
-#include <linux/mlx5/driver.h>
-#include "accel/ipsec.h"
-
-#ifdef CONFIG_MLX5_IPSEC
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!MLX5_CAP_GEN(mdev, ipsec_offload))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return false;
-
- return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
- MLX5_CAP_ETH(mdev, insert_trailer);
-}
-
-#else
-static inline const struct mlx5_accel_ipsec_ops *
-mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- return false;
-}
-
-#endif /* CONFIG_MLX5_IPSEC */
-#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
deleted file mode 100644
index 6c2b86a26863..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/tls.h"
-#include "mlx5_core.h"
-#include "lib/mlx5.h"
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-#include "fpga/tls.h"
-
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid,
- direction_sx);
-}
-
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx)
-{
- mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
-}
-
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
-}
-
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_is_tls_device(mdev) ||
- mlx5_accel_is_ktls_device(mdev);
-}
-
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_device_caps(mdev);
-}
-
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_init(mdev);
-}
-
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- mlx5_fpga_tls_cleanup(mdev);
-}
-#endif
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
-{
- u32 sz_bytes;
- void *key;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- default:
- return -EINVAL;
- }
-
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
-}
-
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
-{
- mlx5_destroy_encryption_key(mdev, key_id);
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
deleted file mode 100644
index fd874f0c380a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_TLS_H__
-#define __MLX5_ACCEL_TLS_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/tls.h>
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
-
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_tx);
-}
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_rx);
-}
-
-static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_accel_is_ktls_tx(mdev) &&
- !mlx5_accel_is_ktls_rx(mdev))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
-}
-
-static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info)
-{
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (crypto_info->version == TLS_1_2_VERSION)
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
- break;
- }
-
- return false;
-}
-#else
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline int
-mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id) { return -ENOTSUPP; }
-static inline void
-mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
-
-static inline bool
-mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
-static inline bool
-mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info) { return false; }
-#endif
-
-enum {
- MLX5_ACCEL_TLS_TX = BIT(0),
- MLX5_ACCEL_TLS_RX = BIT(1),
- MLX5_ACCEL_TLS_V12 = BIT(2),
- MLX5_ACCEL_TLS_V13 = BIT(3),
- MLX5_ACCEL_TLS_LRO = BIT(4),
- MLX5_ACCEL_TLS_IPV6 = BIT(5),
- MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
- MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
-};
-
-struct mlx5_ifc_tls_flow_bits {
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 reserved_at_2[0x1e];
-};
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
-
-#else
-
-static inline int
-mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx) { return -ENOTSUPP; }
-static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_accel_is_ktls_device(mdev);
-}
-static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
-static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 057dde6f4417..e8789e6d7e7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -584,14 +584,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
- strcpy(value.vstr, "dmfs");
- else
- strcpy(value.vstr, "smfs");
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- value);
-
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
@@ -602,18 +594,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
-
- if (MLX5_ESWITCH_MANAGER(dev)) {
- if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
- dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
- value.vbool = true;
- } else {
- value.vbool = false;
- }
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- value);
- }
#endif
value.vu32 = MLX5_COMP_EQ_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 7841ef6c193c..c5bb79a4fa57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -259,6 +259,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ trace_seq_printf(p, "none\n");
+ break;
}
trace_seq_putc(p, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8653ac0fd865..b90902db7819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -354,7 +354,6 @@ enum {
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
- MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
@@ -649,8 +648,8 @@ typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef struct sk_buff *
-(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
+(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 9976de8b9047..b59aee75de94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp {
};
void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
-void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#else
static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
-static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 678ffbb48a25..4130a871de61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -164,7 +164,6 @@ struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
- struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 08fd1370a8b0..3c1edfa33aa7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -5,8 +5,7 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room;
- stop_room = mlx5e_tls_get_stop_room(mdev, params);
+ stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
-
if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- !!MLX5_IPSEC_DEV(mdev);
+ allow_swp =
+ mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
- param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
+ param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 59988e24b704..b979826f3f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -100,7 +100,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
- kfree(spec);
+ kvfree(spec);
if (!dr_matcher)
return ERR_PTR(-EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index fd4504518578..1cbd2eb9d04f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -93,6 +93,7 @@ sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.vport.num = esw->manager_vport;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
if (IS_ERR(tc_psample->termtbl_rule)) {
err = PTR_ERR(tc_psample->termtbl_rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index ab4b0f3ee2a0..228fbd2c20d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1808,7 +1808,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
- kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 021da085e603..9a1553598a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -80,7 +80,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
@@ -99,11 +98,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 7f88ccf67fdd..a8cfab4a393c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -15,7 +15,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 3ec0c17db010..4902ef74fedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -23,7 +23,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
c = priv->channels.c[ix];
if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
/* To avoid WQE overrun, don't post a NOP if async_icosq is not
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 62cde3e87c2e..04c0a5e1c89a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -37,8 +37,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ktls_txrx.h"
#include "en.h"
#include "en/txrx.h"
@@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */
- if (mlx5e_tls_skb_offloaded(skb))
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ if (mlx5e_ktls_skb_offloaded(skb))
+ if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
+ &state->tls)))
return false;
#endif
@@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
+ mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4c4ee524176c..3ae6067c7e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -102,7 +102,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 299e3f0fcb5c..35e2bb301c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -37,23 +37,12 @@
#include <linux/netdevice.h>
#include "en.h"
-#include "en_accel/ipsec.h"
-#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/ipsec_fs.h"
+#include "ipsec.h"
+#include "ipsec_rxtx.h"
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
- struct mlx5e_ipsec_sa_entry *sa;
-
- if (!x)
- return NULL;
-
- sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
- if (!sa)
- return NULL;
-
- WARN_ON(sa->x != x);
- return sa;
+ return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
@@ -74,9 +63,9 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
- unsigned int handle)
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ unsigned int handle = sa_entry->ipsec_obj_id;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
@@ -148,7 +137,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
- struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
struct crypto_aead *aead;
unsigned int crypto_data_len, key_len;
@@ -182,12 +171,6 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
- /* rx handle */
- attrs->sa_handle = sa_entry->handle;
-
- /* algo type */
- attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
-
/* action */
attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
@@ -198,7 +181,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
/* spi */
- attrs->spi = x->id.spi;
+ attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
@@ -226,8 +209,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_ESN)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -274,46 +256,29 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
- if (x->props.family == AF_INET6 &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_IPV6)) {
- netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
- return -EINVAL;
- }
return 0;
}
-static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return 0;
-
- return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
- sa_entry->ipsec_obj_id,
- &sa_entry->ipsec_rule);
-}
-
-static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static void _update_xfrm_state(struct work_struct *work)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return;
+ struct mlx5e_ipsec_modify_state_work *modify_work =
+ container_of(work, struct mlx5e_ipsec_modify_state_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
+ modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
- mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
- &sa_entry->ipsec_rule);
+ mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
+ if (!priv->ipsec)
+ return -EOPNOTSUPP;
err = mlx5e_xfrm_validate_state(x);
if (err)
@@ -331,33 +296,18 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
- /* create xfrm */
- mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
- sa_entry->xfrm =
- mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
- if (IS_ERR(sa_entry->xfrm)) {
- err = PTR_ERR(sa_entry->xfrm);
- goto err_sa_entry;
- }
-
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
/* create hw context */
- sa_entry->hw_context =
- mlx5_accel_esp_create_hw_context(priv->mdev,
- sa_entry->xfrm,
- &sa_handle);
- if (IS_ERR(sa_entry->hw_context)) {
- err = PTR_ERR(sa_entry->hw_context);
+ err = mlx5_ipsec_create_sa_ctx(sa_entry);
+ if (err)
goto err_xfrm;
- }
- sa_entry->ipsec_obj_id = sa_handle;
- err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
+ err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
if (err)
goto err_hw_ctx;
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry);
if (err)
goto err_add_rule;
} else {
@@ -365,18 +315,16 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
+ INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_add_rule:
- mlx5e_xfrm_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
err_hw_ctx:
- mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
+ mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
- mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
-err_sa_entry:
kfree(sa_entry);
-
out:
return err;
}
@@ -385,9 +333,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- if (!sa_entry)
- return;
-
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry);
}
@@ -397,24 +342,18 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
- if (!sa_entry)
- return;
-
- if (sa_entry->hw_context) {
- flush_workqueue(sa_entry->ipsec->wq);
- mlx5e_xfrm_fs_del_rule(priv, sa_entry);
- mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
- mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
- }
-
+ cancel_work_sync(&sa_entry->modify_work.work);
+ mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
- struct mlx5e_ipsec *ipsec = NULL;
+ struct mlx5e_ipsec *ipsec;
+ int ret;
- if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
}
@@ -425,21 +364,27 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
- ida_init(&ipsec->halloc);
- ipsec->en_priv = priv;
- ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
+ ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
- kfree(ipsec);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_wq;
}
+ ret = mlx5e_accel_ipsec_fs_init(ipsec);
+ if (ret)
+ goto err_fs_init;
+
priv->ipsec = ipsec;
- mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
+
+err_fs_init:
+ destroy_workqueue(ipsec->wq);
+err_wq:
+ kfree(ipsec);
+ return (ret != -EOPNOTSUPP) ? ret : 0;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
@@ -449,10 +394,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
- mlx5e_accel_ipsec_fs_cleanup(priv);
+ mlx5e_accel_ipsec_fs_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
-
- ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
}
@@ -472,50 +415,19 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
-struct mlx5e_ipsec_modify_state_work {
- struct work_struct work;
- struct mlx5_accel_esp_xfrm_attrs attrs;
- struct mlx5e_ipsec_sa_entry *sa_entry;
-};
-
-static void _update_xfrm_state(struct work_struct *work)
-{
- int ret;
- struct mlx5e_ipsec_modify_state_work *modify_work =
- container_of(work, struct mlx5e_ipsec_modify_state_work, work);
- struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
-
- ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
- &modify_work->attrs);
- if (ret)
- netdev_warn(sa_entry->ipsec->en_priv->netdev,
- "Not an IPSec offload device\n");
-
- kfree(modify_work);
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5e_ipsec_modify_state_work *modify_work;
+ struct mlx5e_ipsec_modify_state_work *modify_work =
+ &sa_entry->modify_work;
bool need_update;
- if (!sa_entry)
- return;
-
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
- modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
- if (!modify_work)
- return;
-
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
- modify_work->sa_entry = sa_entry;
-
- INIT_WORK(&modify_work->work, _update_xfrm_state);
- WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
+ queue_work(sa_entry->ipsec->wq, &modify_work->work);
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
@@ -531,11 +443,8 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
- !MLX5_CAP_ETH(mdev, swp)) {
- mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+ if (!mlx5_ipsec_device_caps(mdev))
return;
- }
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
@@ -550,15 +459,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
- !MLX5_CAP_ETH(mdev, swp_lso)) {
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}
- if (mlx5_is_ipsec_device(mdev))
- netdev->gso_partial_features |= NETIF_F_GSO_ESP;
-
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 6164c7f59efb..16bcceec16c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -40,11 +40,56 @@
#include <net/xfrm.h>
#include <linux/idr.h>
-#include "accel/ipsec.h"
-
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
+enum mlx5_accel_esp_flags {
+ MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
+ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
+ MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
+};
+
+enum mlx5_accel_esp_action {
+ MLX5_ACCEL_ESP_ACTION_DECRYPT,
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT,
+};
+
+struct aes_gcm_keymat {
+ u64 seq_iv;
+
+ u32 salt;
+ u32 icv_len;
+
+ u32 key_len;
+ u32 aes_key[256 / 32];
+};
+
+struct mlx5_accel_esp_xfrm_attrs {
+ enum mlx5_accel_esp_action action;
+ u32 esn;
+ u32 spi;
+ u32 flags;
+ struct aes_gcm_keymat aes_gcm;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } saddr;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } daddr;
+
+ u8 is_ipv6;
+};
+
+enum mlx5_ipsec_cap {
+ MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
+ MLX5_IPSEC_CAP_ESN = 1 << 1,
+};
+
struct mlx5e_priv;
struct mlx5e_ipsec_sw_stats {
@@ -55,37 +100,16 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer;
- atomic64_t ipsec_tx_drop_metadata;
-};
-
-struct mlx5e_ipsec_stats {
- u64 ipsec_dec_in_packets;
- u64 ipsec_dec_out_packets;
- u64 ipsec_dec_bypass_packets;
- u64 ipsec_enc_in_packets;
- u64 ipsec_enc_out_packets;
- u64 ipsec_enc_bypass_packets;
- u64 ipsec_dec_drop_packets;
- u64 ipsec_dec_auth_fail_packets;
- u64 ipsec_enc_drop_packets;
- u64 ipsec_add_sa_success;
- u64 ipsec_add_sa_fail;
- u64 ipsec_del_sa_success;
- u64 ipsec_del_sa_fail;
- u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
- struct mlx5e_priv *en_priv;
+ struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
- bool no_trailer;
- spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
- struct ida halloc;
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
- struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
@@ -102,21 +126,26 @@ struct mlx5e_ipsec_rule {
struct mlx5_modify_hdr *set_modify_hdr;
};
+struct mlx5e_ipsec_modify_state_work {
+ struct work_struct work;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+};
+
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
- struct mlx5_accel_esp_xfrm *xfrm;
- void *hw_context;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
u32 ipsec_obj_id;
+ u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
+ struct mlx5e_ipsec_modify_state_work modify_work;
};
-void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -124,12 +153,27 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
-#else
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_sa_entry *sa_entry);
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
-static inline void mlx5e_ipsec_build_inverse_table(void)
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs);
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ return sa_entry->ipsec->mdev;
}
-
+#else
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;
@@ -143,6 +187,10 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 17da23dff0ed..8315e8f603d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2,8 +2,9 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
-#include "accel/ipsec_offload.h"
-#include "ipsec_fs.h"
+#include "en.h"
+#include "en/fs.h"
+#include "ipsec.h"
#include "fs_core.h"
#define NUM_IPSEC_FTE BIT(15)
@@ -35,6 +36,7 @@ struct mlx5e_accel_fs_esp {
};
struct mlx5e_ipsec_tx {
+ struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
struct mutex mutex; /* Protect IPsec TX steering */
u32 refcnt;
@@ -58,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_spec *spec;
- int err = 0;
+ int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -94,101 +96,27 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
goto out;
}
+ kvfree(spec);
rx_err->rule = fte;
rx_err->copy_modify_hdr = modify_hdr;
+ return 0;
out:
- if (err)
- mlx5_modify_header_dealloc(mdev, modify_hdr);
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
kvfree(spec);
return err;
}
-static void rx_err_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_rx_err *rx_err)
-{
- if (rx_err->rule) {
- mlx5_del_flow_rules(rx_err->rule);
- rx_err->rule = NULL;
- }
-
- if (rx_err->copy_modify_hdr) {
- mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
- rx_err->copy_modify_hdr = NULL;
- }
-}
-
-static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
-{
- rx_err_del_rule(priv, rx_err);
-
- if (rx_err->ft) {
- mlx5_destroy_flow_table(rx_err->ft);
- rx_err->ft = NULL;
- }
-}
-
-static int rx_err_create_ft(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot,
- struct mlx5e_ipsec_rx_err *rx_err)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *ft;
- int err;
-
- ft_attr.max_fte = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
- return err;
- }
-
- rx_err->ft = ft;
- err = rx_err_add_rule(priv, fs_prot, rx_err);
- if (err)
- goto out_err;
-
- return 0;
-
-out_err:
- mlx5_destroy_flow_table(ft);
- rx_err->ft = NULL;
- return err;
-}
-
-static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
-{
- if (fs_prot->miss_rule) {
- mlx5_del_flow_rules(fs_prot->miss_rule);
- fs_prot->miss_rule = NULL;
- }
-
- if (fs_prot->miss_group) {
- mlx5_destroy_flow_group(fs_prot->miss_group);
- fs_prot->miss_group = NULL;
- }
-
- if (fs_prot->ft) {
- mlx5_destroy_flow_table(fs_prot->ft);
- fs_prot->ft = NULL;
- }
-}
-
static int rx_fs_create(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft = fs_prot->ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
- struct mlx5_flow_table *ft;
u32 *flow_group_in;
int err = 0;
@@ -199,20 +127,6 @@ static int rx_fs_create(struct mlx5e_priv *priv,
goto out;
}
- /* Create FT */
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
- goto out;
- }
- fs_prot->ft = ft;
-
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
@@ -227,19 +141,19 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss rule */
miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
if (IS_ERR(miss_rule)) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
err = PTR_ERR(miss_rule);
netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
goto out;
}
fs_prot->miss_rule = miss_rule;
-
out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
-static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -249,17 +163,21 @@ static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_esp->fs_prot[type];
- rx_fs_destroy(fs_prot);
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ mlx5_destroy_flow_table(fs_prot->ft);
- rx_err_destroy_ft(priv, &fs_prot->rx_err);
-
- return 0;
+ mlx5_del_flow_rules(fs_prot->rx_err.rule);
+ mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+ mlx5_destroy_flow_table(fs_prot->rx_err.ft);
}
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_table *ft;
int err;
accel_esp = priv->ipsec->rx_fs;
@@ -268,14 +186,45 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot->default_dest =
mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
- err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ fs_prot->rx_err.ft = ft;
+ err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
if (err)
- return err;
+ goto err_add;
+
+ /* Create FT */
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_fs_ft;
+ }
+ fs_prot->ft = ft;
err = rx_fs_create(priv, fs_prot);
if (err)
- rx_destroy(priv, type);
+ goto err_fs;
+ return 0;
+
+err_fs:
+ mlx5_destroy_flow_table(fs_prot->ft);
+err_fs_ft:
+ mlx5_del_flow_rules(fs_prot->rx_err.rule);
+ mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+err_add:
+ mlx5_destroy_flow_table(fs_prot->rx_err.ft);
return err;
}
@@ -289,21 +238,21 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
- if (fs_prot->refcnt++)
- goto out;
+ if (fs_prot->refcnt)
+ goto skip;
/* create FT */
err = rx_create(priv, type);
- if (err) {
- fs_prot->refcnt--;
+ if (err)
goto out;
- }
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+skip:
+ fs_prot->refcnt++;
out:
mutex_unlock(&fs_prot->prot_mutex);
return err;
@@ -317,7 +266,8 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
- if (--fs_prot->refcnt)
+ fs_prot->refcnt--;
+ if (fs_prot->refcnt)
goto out;
/* disconnect */
@@ -338,15 +288,9 @@ static int tx_create(struct mlx5e_priv *priv)
struct mlx5_flow_table *ft;
int err;
- priv->fs.egress_ns =
- mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
- if (!priv->fs.egress_ns)
- return -EOPNOTSUPP;
-
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
@@ -356,32 +300,20 @@ static int tx_create(struct mlx5e_priv *priv)
return 0;
}
-static void tx_destroy(struct mlx5e_priv *priv)
-{
- struct mlx5e_ipsec *ipsec = priv->ipsec;
-
- if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
- return;
-
- mlx5_destroy_flow_table(ipsec->tx_fs->ft);
- ipsec->tx_fs->ft = NULL;
-}
-
static int tx_ft_get(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
int err = 0;
mutex_lock(&tx_fs->mutex);
- if (tx_fs->refcnt++)
- goto out;
+ if (tx_fs->refcnt)
+ goto skip;
err = tx_create(priv);
- if (err) {
- tx_fs->refcnt--;
+ if (err)
goto out;
- }
-
+skip:
+ tx_fs->refcnt++;
out:
mutex_unlock(&tx_fs->mutex);
return err;
@@ -392,11 +324,11 @@ static void tx_ft_put(struct mlx5e_priv *priv)
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
mutex_lock(&tx_fs->mutex);
- if (--tx_fs->refcnt)
+ tx_fs->refcnt--;
+ if (tx_fs->refcnt)
goto out;
- tx_destroy(priv);
-
+ mlx5_destroy_flow_table(tx_fs->ft);
out:
mutex_unlock(&tx_fs->mutex);
}
@@ -424,8 +356,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
/* SPI number */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
- be32_to_cpu(attrs->spi));
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.outer_esp_spi, attrs->spi);
if (ip_version == 4) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -458,11 +390,12 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
}
static int rx_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
struct mlx5_modify_hdr *modify_hdr = NULL;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
@@ -536,9 +469,7 @@ out:
}
static int tx_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -555,7 +486,8 @@ static int tx_add_rule(struct mlx5e_priv *priv,
goto out;
}
- setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+ setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
+ &flow_act);
/* Add IPsec indicator in metadata_reg_a */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
@@ -570,11 +502,11 @@ static int tx_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- attrs->action, err);
+ sa_entry->attrs.action, err);
goto out;
}
- ipsec_rule->rule = rule;
+ sa_entry->ipsec_rule.rule = rule;
out:
kvfree(spec);
@@ -583,133 +515,88 @@ out:
return err;
}
-static void rx_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule)
-{
- mlx5_del_flow_rules(ipsec_rule->rule);
- ipsec_rule->rule = NULL;
-
- mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
- ipsec_rule->set_modify_hdr = NULL;
-
- rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
-}
-
-static void tx_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_rule *ipsec_rule)
-{
- mlx5_del_flow_rules(ipsec_rule->rule);
- ipsec_rule->rule = NULL;
-
- tx_ft_put(priv);
-}
-
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!priv->ipsec->rx_fs)
- return -EOPNOTSUPP;
+ if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
+ return tx_add_rule(priv, sa_entry);
- if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
- else
- return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ return rx_add_rule(priv, sa_entry);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!priv->ipsec->rx_fs)
- return;
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
- if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- rx_del_rule(priv, attrs, ipsec_rule);
- else
- tx_del_rule(priv, ipsec_rule);
-}
+ mlx5_del_flow_rules(ipsec_rule->rule);
-static void fs_cleanup_tx(struct mlx5e_priv *priv)
-{
- mutex_destroy(&priv->ipsec->tx_fs->mutex);
- WARN_ON(priv->ipsec->tx_fs->refcnt);
- kfree(priv->ipsec->tx_fs);
- priv->ipsec->tx_fs = NULL;
+ if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
+ tx_ft_put(priv);
+ return;
+ }
+
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
+ rx_ft_put(priv,
+ sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
-static void fs_cleanup_rx(struct mlx5e_priv *priv)
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
- accel_esp = priv->ipsec->rx_fs;
+ if (!ipsec->rx_fs)
+ return;
+
+ mutex_destroy(&ipsec->tx_fs->mutex);
+ WARN_ON(ipsec->tx_fs->refcnt);
+ kfree(ipsec->tx_fs);
+
+ accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_destroy(&fs_prot->prot_mutex);
WARN_ON(fs_prot->refcnt);
}
- kfree(priv->ipsec->rx_fs);
- priv->ipsec->rx_fs = NULL;
-}
-
-static int fs_init_tx(struct mlx5e_priv *priv)
-{
- priv->ipsec->tx_fs =
- kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
- if (!priv->ipsec->tx_fs)
- return -ENOMEM;
-
- mutex_init(&priv->ipsec->tx_fs->mutex);
- return 0;
+ kfree(ipsec->rx_fs);
}
-static int fs_init_rx(struct mlx5e_priv *priv)
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_namespace *ns;
enum accel_fs_esp_type i;
+ int err = -ENOMEM;
+
+ ns = mlx5_get_flow_namespace(ipsec->mdev,
+ MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ if (!ns)
+ return -EOPNOTSUPP;
- priv->ipsec->rx_fs =
- kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
- if (!priv->ipsec->rx_fs)
+ ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
+ if (!ipsec->tx_fs)
return -ENOMEM;
- accel_esp = priv->ipsec->rx_fs;
+ ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
+ if (!ipsec->rx_fs)
+ goto err_rx;
+
+ mutex_init(&ipsec->tx_fs->mutex);
+ ipsec->tx_fs->ns = ns;
+
+ accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_init(&fs_prot->prot_mutex);
}
return 0;
-}
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
-{
- if (!priv->ipsec->rx_fs)
- return;
-
- fs_cleanup_tx(priv);
- fs_cleanup_rx(priv);
-}
-
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
-{
- int err;
-
- if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
- return -EOPNOTSUPP;
-
- err = fs_init_tx(priv);
- if (err)
- return err;
-
- err = fs_init_rx(priv);
- if (err)
- fs_cleanup_tx(priv);
+err_rx:
+ kfree(ipsec->tx_fs);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
index 3389b3bb3ef8..e4eeb2ba21c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
@@ -6,12 +6,11 @@
#include "en.h"
#include "ipsec.h"
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "en/fs.h"
-#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
@@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
-#else
-static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
-#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
new file mode 100644
index 000000000000..792724ce7336
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "ipsec.h"
+#include "lib/mlx5.h"
+
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ u32 caps = 0;
+
+ if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return 0;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return 0;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
+ return 0;
+
+ if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
+ MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
+ caps |= MLX5_IPSEC_CAP_CRYPTO;
+
+ if (!caps)
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
+ caps |= MLX5_IPSEC_CAP_ESN;
+
+ /* We can accommodate up to 2^24 different IPsec objects
+ * because we use up to 24 bit in flow table metadata
+ * to hold the IPsec Object unique handle.
+ */
+ WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
+ return caps;
+}
+EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+
+static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
+ void *obj, *salt_p, *salt_iv_p;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
+
+ /* salt and seq_iv */
+ salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
+ memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
+
+ MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
+ salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
+ memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
+ /* esn */
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+ MLX5_SET(ipsec_obj, obj, esn_en, 1);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+ MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ }
+
+ MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sa_entry->ipsec_obj_id =
+ MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ int err;
+
+ /* key */
+ err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
+ aes_gcm->key_len / BITS_PER_BYTE,
+ MLX5_ACCEL_OBJ_IPSEC_KEY,
+ &sa_entry->enc_key_id);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ err = mlx5_create_ipsec_obj(sa_entry);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
+ goto err_enc_key;
+ }
+
+ return 0;
+
+err_enc_key:
+ mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
+ return err;
+}
+
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_destroy_ipsec_obj(sa_entry);
+ mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
+}
+
+static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
+ u64 modify_field_select = 0;
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+ return 0;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return -EINVAL;
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
+ sa_entry->ipsec_obj_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
+ modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
+
+ /* esn */
+ if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
+ MLX5_SET64(ipsec_obj, obj, modify_field_select,
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+ MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ int err;
+
+ err = mlx5_modify_ipsec_obj(sa_entry, attrs);
+ if (err)
+ return;
+
+ memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index b56fea142c24..6859f1c1a831 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,78 +34,15 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-#include "accel/ipsec_offload.h"
-#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/ipsec.h"
-#include "accel/accel.h"
+#include "ipsec.h"
+#include "ipsec_rxtx.h"
#include "en.h"
enum {
- MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
- MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
- MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
-};
-
-struct mlx5e_ipsec_rx_metadata {
- unsigned char nexthdr;
- __be32 sa_handle;
-} __packed;
-
-enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
};
-struct mlx5e_ipsec_tx_metadata {
- __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
- __be16 seq; /* LSBs of the first TCP seq, only for LSO */
- u8 esp_next_proto; /* Next protocol of ESP */
-} __packed;
-
-struct mlx5e_ipsec_metadata {
- unsigned char syndrome;
- union {
- unsigned char raw[5];
- /* from FPGA to host, on successful decrypt */
- struct mlx5e_ipsec_rx_metadata rx;
- /* from host to FPGA */
- struct mlx5e_ipsec_tx_metadata tx;
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-#define MAX_LSO_MSS 2048
-
-/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
-static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
-
-static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
-{
- return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
-}
-
-static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *eth;
-
- if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
- return ERR_PTR(-ENOMEM);
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
- skb->mac_header -= sizeof(*mdata);
- mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(*mdata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
-
- memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
- return mdata;
-}
-
static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
{
unsigned int alen = crypto_aead_authsize(x->data);
@@ -244,40 +181,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb_store_bits(skb, iv_offset, &seqno, 8);
}
-static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata,
- struct xfrm_offload *xo)
-{
- struct ip_esp_hdr *esph;
- struct tcphdr *tcph;
-
- if (skb_is_gso(skb)) {
- /* Add LSO metadata indication */
- esph = ip_esp_hdr(skb);
- tcph = inner_tcp_hdr(skb);
- netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
- skb->network_header,
- skb->transport_header,
- skb->inner_network_header,
- skb->inner_transport_header);
- netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
- skb->len, skb_shinfo(skb)->gso_size,
- ntohs(tcph->source), ntohs(tcph->dest),
- ntohl(tcph->seq), ntohl(esph->seq_no));
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
- mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
- mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
- } else {
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
- }
- mdata->content.tx.esp_next_proto = xo->proto;
-
- netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
- mdata->syndrome, mdata->content.tx.esp_next_proto,
- ntohs(mdata->content.tx.mss_inv),
- ntohs(mdata->content.tx.seq));
-}
-
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg)
@@ -298,16 +201,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
ipsec_st->x = x;
ipsec_st->xo = xo;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- aead = x->data;
- alen = crypto_aead_authsize(aead);
- blksize = ALIGN(crypto_aead_blocksize(aead), 4);
- clen = ALIGN(skb->len + 2, blksize);
- plen = max_t(u32, clen - skb->len, 4);
- tailen = plen + alen;
- ipsec_st->plen = plen;
- ipsec_st->tailen = tailen;
- }
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
return 0;
}
@@ -340,19 +241,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
- eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
- encap = x->encap;
- if (!encap) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
- } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
- }
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
}
}
@@ -363,7 +262,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry;
- struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x;
struct sec_path *sp;
@@ -392,19 +290,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
- if (MLX5_CAP_GEN(priv->mdev, fpga)) {
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
- }
- }
-
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- if (MLX5_CAP_GEN(priv->mdev, fpga))
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
-
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
@@ -414,79 +301,6 @@ drop:
return false;
}
-static inline struct xfrm_state *
-mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct xfrm_offload *xo;
- struct xfrm_state *xs;
- struct sec_path *sp;
- u32 sa_handle;
-
- sp = secpath_set(skb);
- if (unlikely(!sp)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
- return NULL;
- }
-
- sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
- xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
- if (unlikely(!xs)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
- return NULL;
- }
-
- sp = skb_sec_path(skb);
- sp->xvec[sp->len++] = xs;
- sp->olen++;
-
- xo = xfrm_offload(skb);
- xo->flags = CRYPTO_DONE;
- switch (mdata->syndrome) {
- case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- if (likely(priv->ipsec->no_trailer)) {
- xo->flags |= XFRM_ESP_NO_TRAILER;
- xo->proto = mdata->content.rx.nexthdr;
- }
- break;
- case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
- xo->status = CRYPTO_INVALID_PROTOCOL;
- break;
- default:
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
- return NULL;
- }
- return xs;
-}
-
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct xfrm_state *xs;
-
- if (!is_metadata_hdr_valid(skb))
- return skb;
-
- /* Use the metadata */
- mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
- xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
- if (unlikely(!xs)) {
- kfree_skb(skb);
- return NULL;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-
- return skb;
-}
-
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
@@ -518,7 +332,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
return;
}
- sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;
@@ -528,8 +341,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
- if (WARN_ON_ONCE(priv->ipsec->no_trailer))
- xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
@@ -541,21 +352,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
-
-void mlx5e_ipsec_build_inverse_table(void)
-{
- u16 mss_inv;
- u32 mss;
-
- /* Calculate 1/x inverse table for use in GSO data path.
- * Using this table, we provide the IPSec accelerator with the value of
- * 1/gso_size so that it can infer the position of each segment inside
- * the GSO, and increment the ESP sequence number, and generate the IV.
- * The HW needs this value in Q0.16 fixed-point number format
- */
- mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
- for (mss = 2; mss < MAX_LSO_MSS; mss++) {
- mss_inv = div_u64(1ULL << 32, mss) >> 16;
- mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 428881e0adcb..0ae4e12ce528 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt);
-
void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 5cb936541b9e..9de84821dafb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -35,27 +35,7 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/ipsec.h"
-#include "fpga/sdk.h"
-#include "en_accel/ipsec.h"
-#include "fpga/ipsec.h"
-
-static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
-};
+#include "ipsec.h"
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
@@ -65,13 +45,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -103,45 +81,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
-{
- return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
-{
- int ret = 0;
-
- if (priv->ipsec)
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
- if (ret)
- memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
-{
- unsigned int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
-{
- int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc,
- i);
- return idx;
-}
-
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
-MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d93aadbf10da..814f2a56f633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,11 +2,49 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
-#include "en_accel/tls.h"
+#include "lib/mlx5.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id)
+{
+ u32 sz_bytes;
+ void *key;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5_create_encryption_key(mdev, key, sz_bytes,
+ MLX5_ACCEL_OBJ_TLS_KEY,
+ p_key_id);
+}
+
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_destroy_encryption_key(mdev, key_id);
+}
+
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
@@ -59,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
return;
- if (mlx5e_accel_is_ktls_tx(mdev)) {
+ if (mlx5e_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -92,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -112,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
@@ -120,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
}
+
+int mlx5e_ktls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
+{
+ kfree(priv->tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 5833deb2354c..d016624fbc9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,9 +4,42 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/tls.h>
+#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id);
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ if (is_kdump_kernel())
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+}
+
+static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info)
+{
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ break;
+ }
+
+ return false;
+}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
@@ -16,26 +49,36 @@ struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_tx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_rx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
}
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(mdev);
-}
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
+ atomic64_t tx_tls_del;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
+};
-#else
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+ struct workqueue_struct *rx_wq;
+};
+int mlx5e_ktls_init(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
@@ -64,10 +107,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ return 0;
+}
+static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 96064a2033f7..0bb0633b7542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -3,7 +3,7 @@
#include <net/inet6_hashtables.h>
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 56e7b2aee85f..2ab46c4247ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -36,14 +36,7 @@
#include "en.h"
#include "fpga/sdk.h"
-#include "en_accel/tls.h"
-
-static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
-};
+#include "en_accel/ktls.h"
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
@@ -55,51 +48,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
-{
- if (!priv->tls)
- return NULL;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return mlx5e_ktls_sw_stats_desc;
- return mlx5e_tls_sw_stats_desc;
-}
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
- return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
+
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- stats_desc[i].format);
+ mlx5e_ktls_sw_stats_desc[i].format);
return n;
}
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] =
- MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc,
+ i);
return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index aaf11c66bf4c..4b6f0d1ea59a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{
u16 num_dumps, stop_room = 0;
- if (!mlx5e_accel_is_ktls_tx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev))
return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
@@ -448,14 +448,26 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct tls_context *tls_ctx;
+ int datalen;
u32 seq;
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ return true;
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 08c9d5134479..2dd78dd4ad65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state {
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
@@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
+
+static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return false;
}
+static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 *cqe_bcnt)
+{
+}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index e5c180f2403b..0dc715c4c10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -6,7 +6,6 @@
#include <net/tls.h>
#include "en.h"
-#include "accel/tls.h"
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
deleted file mode 100644
index b8fc863aa68d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/netdevice.h>
-#include <net/ipv6.h>
-#include "en_accel/tls.h"
-#include "accel/tls.h"
-
-static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 0);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 1);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-}
-#endif
-
-static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
- MLX5_FLD_SZ_BYTES(tls_flow, src_port));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
- MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
-}
-
-static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
-{
- switch (sk->sk_family) {
- case AF_INET:
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
- }
- if (!(caps & MLX5_ACCEL_TLS_IPV6))
- goto error_out;
-
- mlx5e_tls_set_ipv6_flow(flow, sk);
- break;
-#endif
- default:
- goto error_out;
- }
-
- mlx5e_tls_set_flow_tcp_ports(flow, sk);
- return 0;
-error_out:
- return -EINVAL;
-}
-
-static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 caps = mlx5_accel_tls_device_caps(mdev);
- int ret = -ENOMEM;
- void *flow;
- u32 swid;
-
- flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
- if (!flow)
- return ret;
-
- ret = mlx5e_tls_set_flow(flow, sk, caps);
- if (ret)
- goto free_flow;
-
- ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
- if (ret < 0)
- goto free_flow;
-
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context_tx *tx_ctx =
- mlx5e_get_tls_tx_context(tls_ctx);
-
- tx_ctx->swid = htonl(swid);
- tx_ctx->expected_seq = start_offload_tcp_sn;
- } else {
- struct mlx5e_tls_offload_context_rx *rx_ctx =
- mlx5e_get_tls_rx_context(tls_ctx);
-
- rx_ctx->handle = htonl(swid);
- }
-
- return 0;
-free_flow:
- kfree(flow);
- return ret;
-}
-
-static void mlx5e_tls_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- unsigned int handle;
-
- handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
- mlx5e_get_tls_tx_context(tls_ctx)->swid :
- mlx5e_get_tls_rx_context(tls_ctx)->handle);
-
- mlx5_accel_tls_del_flow(priv->mdev, handle,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
-}
-
-static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
- u32 seq, u8 *rcd_sn_data,
- enum tls_offload_ctx_dir direction)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_rx *rx_ctx;
- __be64 rcd_sn = *(__be64 *)rcd_sn_data;
-
- if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
- return -EINVAL;
- rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
-
- netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
- be64_to_cpu(rcd_sn));
- mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
-
- return 0;
-}
-
-static const struct tlsdev_ops mlx5e_tls_ops = {
- .tls_dev_add = mlx5e_tls_add,
- .tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync = mlx5e_tls_resync,
-};
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- struct net_device *netdev = priv->netdev;
- u32 caps;
-
- if (mlx5e_accel_is_ktls_device(priv->mdev)) {
- mlx5e_ktls_build_netdev(priv);
- return;
- }
-
- /* FPGA */
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return;
-
- caps = mlx5_accel_tls_device_caps(priv->mdev);
- if (caps & MLX5_ACCEL_TLS_TX) {
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- }
-
- if (caps & MLX5_ACCEL_TLS_RX) {
- netdev->features |= NETIF_F_HW_TLS_RX;
- netdev->hw_features |= NETIF_F_HW_TLS_RX;
- }
-
- if (!(caps & MLX5_ACCEL_TLS_LRO)) {
- netdev->features &= ~NETIF_F_LRO;
- netdev->hw_features &= ~NETIF_F_LRO;
- }
-
- netdev->tlsdev_ops = &mlx5e_tls_ops;
-}
-
-int mlx5e_tls_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls;
-
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- priv->tls = tls;
- return 0;
-}
-
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls = priv->tls;
-
- if (!tls)
- return;
-
- kfree(tls);
- priv->tls = NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
deleted file mode 100644
index 62ecf14bf86a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#ifndef __MLX5E_TLS_H__
-#define __MLX5E_TLS_H__
-
-#include "accel/tls.h"
-#include "en_accel/ktls.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-#include <net/tls.h>
-#include "en.h"
-
-struct mlx5e_tls_sw_stats {
- atomic64_t tx_tls_ctx;
- atomic64_t tx_tls_del;
- atomic64_t tx_tls_drop_metadata;
- atomic64_t tx_tls_drop_resync_alloc;
- atomic64_t tx_tls_drop_no_sync_data;
- atomic64_t tx_tls_drop_bypass_required;
- atomic64_t rx_tls_ctx;
- atomic64_t rx_tls_del;
- atomic64_t rx_tls_drop_resync_request;
- atomic64_t rx_tls_resync_request;
- atomic64_t rx_tls_resync_reply;
- atomic64_t rx_tls_auth_fail;
-};
-
-struct mlx5e_tls {
- struct mlx5e_tls_sw_stats sw_stats;
- struct workqueue_struct *rx_wq;
-};
-
-struct mlx5e_tls_offload_context_tx {
- struct tls_offload_context_tx base;
- u32 expected_seq;
- __be32 swid;
-};
-
-static inline struct mlx5e_tls_offload_context_tx *
-mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct mlx5e_tls_offload_context_tx,
- base);
-}
-
-struct mlx5e_tls_offload_context_rx {
- struct tls_offload_context_rx base;
- __be32 handle;
-};
-
-static inline struct mlx5e_tls_offload_context_rx *
-mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
- return container_of(tls_offload_ctx_rx(tls_ctx),
- struct mlx5e_tls_offload_context_rx,
- base);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
-{
- return priv->tls;
-}
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_tls_init(struct mlx5e_priv *priv);
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv);
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
-
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_tls_device(mdev);
-}
-
-#else
-
-static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- if (!is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(priv->mdev))
- mlx5e_ktls_build_netdev(priv);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
-static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
-static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif
-
-#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
deleted file mode 100644
index a05580cea481..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
-#include "accel/accel.h"
-
-#include <net/inet6_hashtables.h>
-#include <linux/ipv6.h>
-
-#define SYNDROM_DECRYPTED 0x30
-#define SYNDROM_RESYNC_REQUEST 0x31
-#define SYNDROM_AUTH_FAILED 0x32
-
-#define SYNDROME_OFFLOAD_REQUIRED 32
-#define SYNDROME_SYNC 33
-
-struct sync_info {
- u64 rcd_sn;
- s32 sync_len;
- int nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
-};
-
-struct recv_metadata_content {
- u8 syndrome;
- u8 reserved;
- __be32 sync_seq;
-} __packed;
-
-struct send_metadata_content {
- /* One byte of syndrome followed by 3 bytes of swid */
- __be32 syndrome_swid;
- __be16 first_seq;
-} __packed;
-
-struct mlx5e_tls_metadata {
- union {
- /* from fpga to host */
- struct recv_metadata_content recv;
- /* from host to fpga */
- struct send_metadata_content send;
- unsigned char raw[6];
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
-{
- struct mlx5e_tls_metadata *pet;
- struct ethhdr *eth;
-
- if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
- return -ENOMEM;
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
- skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
- pet = (struct mlx5e_tls_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->content.send.syndrome_swid =
- htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
-
- return 0;
-}
-
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
- u32 tcp_seq, struct sync_info *info)
-{
- int remaining, i = 0, ret = -EINVAL;
- struct tls_record_info *record;
- unsigned long flags;
- s32 sync_size;
-
- spin_lock_irqsave(&context->base.lock, flags);
- record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
-
- if (unlikely(!record))
- goto out;
-
- sync_size = tcp_seq - tls_record_start_seq(record);
- info->sync_len = sync_size;
- if (unlikely(sync_size < 0)) {
- if (tls_record_is_start_marker(record))
- goto done;
-
- goto out;
- }
-
- remaining = sync_size;
- while (remaining > 0) {
- info->frags[i] = record->frags[i];
- __skb_frag_ref(&info->frags[i]);
- remaining -= skb_frag_size(&info->frags[i]);
-
- if (remaining < 0)
- skb_frag_size_add(&info->frags[i], remaining);
-
- i++;
- }
- info->nr_frags = i;
-done:
- ret = 0;
-out:
- spin_unlock_irqrestore(&context->base.lock, flags);
- return ret;
-}
-
-static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
- struct sk_buff *nskb, u32 tcp_seq,
- int headln, __be64 rcd_sn)
-{
- struct mlx5e_tls_metadata *pet;
- u8 syndrome = SYNDROME_SYNC;
- struct iphdr *iph;
- struct tcphdr *th;
- int data_len, mss;
-
- nskb->dev = skb->dev;
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_offset(skb));
- skb_set_transport_header(nskb, skb_transport_offset(skb));
- memcpy(nskb->data, skb->data, headln);
- memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
-
- iph = ip_hdr(nskb);
- iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
- th = tcp_hdr(nskb);
- data_len = nskb->len - headln;
- tcp_seq -= data_len;
- th->seq = htonl(tcp_seq);
-
- mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
- skb_shinfo(nskb)->gso_size = 0;
- if (data_len > mss) {
- skb_shinfo(nskb)->gso_size = mss;
- skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
- }
- skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
-
- pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
- memcpy(pet, &syndrome, sizeof(syndrome));
- pet->content.send.first_seq = htons(tcp_seq);
-
- /* MLX5 devices don't care about the checksum partial start, offset
- * and pseudo header
- */
- nskb->ip_summed = CHECKSUM_PARTIAL;
-
- nskb->queue_mapping = skb->queue_mapping;
-}
-
-static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tls *tls)
-{
- u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct sync_info info;
- struct sk_buff *nskb;
- int linear_len = 0;
- int headln;
- int i;
-
- sq->stats->tls_ooo++;
-
- if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
- * It should be safe to drop the packet in this case
- */
- atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
- goto err_out;
- }
-
- if (unlikely(info.sync_len < 0)) {
- u32 payload;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- /* SKB payload doesn't require offload
- */
- return true;
-
- atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
- goto err_out;
- }
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
- goto err_out;
- }
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- linear_len += headln + sizeof(info.rcd_sn);
- nskb = alloc_skb(linear_len, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
- goto err_out;
- }
-
- context->expected_seq = tcp_seq + skb->len - headln;
- skb_put(nskb, linear_len);
- for (i = 0; i < info.nr_frags; i++)
- skb_shinfo(nskb)->frags[i] = info.frags[i];
-
- skb_shinfo(nskb)->nr_frags = info.nr_frags;
- nskb->data_len = info.sync_len;
- nskb->len += info.sync_len;
- sq->stats->tls_resync_bytes += nskb->len;
- mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
- cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit_simple(sq, nskb, true);
-
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_tx *context;
- struct tls_context *tls_ctx;
- u32 expected_seq;
- int datalen;
- u32 skb_seq;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- return true;
-
- mlx5e_tx_mpwqe_ensure_complete(sq);
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
- if (mlx5e_accel_is_ktls_tx(sq->mdev))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
-
- /* FPGA */
- skb_seq = ntohl(tcp_hdr(skb)->seq);
- context = mlx5e_get_tls_tx_context(tls_ctx);
- expected_seq = context->expected_seq;
-
- if (unlikely(expected_seq != skb_seq))
- return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
- dev_kfree_skb_any(skb);
- return false;
- }
-
- context->expected_seq = skb_seq + datalen;
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-static int tls_update_resync_sn(struct net_device *netdev,
- struct sk_buff *skb,
- struct mlx5e_tls_metadata *mdata)
-{
- struct sock *sk = NULL;
- struct iphdr *iph;
- struct tcphdr *th;
- __be32 seq;
-
- if (mdata->ethertype != htons(ETH_P_IP))
- return -EINVAL;
-
- iph = (struct iphdr *)(mdata + 1);
-
- th = ((void *)iph) + iph->ihl * 4;
-
- if (iph->version == 4) {
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
-
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
- &ipv6h->saddr, th->source,
- &ipv6h->daddr, ntohs(th->dest),
- netdev->ifindex, 0);
-#endif
- }
- if (!sk || sk->sk_state == TCP_TIME_WAIT) {
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
- goto out;
- }
-
- skb->sk = sk;
- skb->destructor = sock_edemux;
-
- memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
- tls_offload_rx_resync_request(sk, seq);
-out:
- return 0;
-}
-
-/* FPGA tls rx handler */
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt)
-{
- struct mlx5e_tls_metadata *mdata;
- struct mlx5e_priv *priv;
-
- /* Use the metadata */
- mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
- switch (mdata->content.recv.syndrome) {
- case SYNDROM_DECRYPTED:
- skb->decrypted = 1;
- break;
- case SYNDROM_RESYNC_REQUEST:
- tls_update_resync_sn(rq->netdev, skb, mdata);
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
- break;
- case SYNDROM_AUTH_FAILED:
- /* Authentication failure will be observed and verified by kTLS */
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
- break;
- default:
- /* Bypass the metadata header to others */
- return;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-}
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- if (!mlx5e_accel_is_tls_device(mdev))
- return 0;
-
- if (mlx5e_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(mdev, params);
-
- /* FPGA */
- /* Resync SKB. */
- return mlx5e_stop_room_for_max_wqe(mdev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
deleted file mode 100644
index 0ca0a023fb8d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5E_TLS_RXTX_H__
-#define __MLX5E_TLS_RXTX_H__
-
-#include "accel/accel.h"
-#include "en_accel/ktls_txrx.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-
-#include <linux/skbuff.h>
-#include "en.h"
-#include "en/txrx.h"
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-
-static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
-static inline void
-mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt);
-
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
-{
- if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
- return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
-
- if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
- return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
-}
-
-#else
-
-static inline bool
-mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- return 0;
-}
-
-#endif /* CONFIG_MLX5_EN_TLS */
-
-#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8ead2c82a52a..2449731b7d79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1026,15 +1026,6 @@ void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
}
-void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
-
- if (MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-}
-
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2f1dedc721d1..d27986869b8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -47,9 +47,7 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ktls.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
@@ -68,7 +66,6 @@
#include "en/ptp.h"
#include "qos.h"
#include "en/trap.h"
-#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -1036,9 +1033,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
-
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
@@ -1334,7 +1328,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
- if (MLX5_IPSEC_DEV(c->priv->mdev))
+ if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
@@ -4471,12 +4465,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
- netdev_warn(netdev,
- "XDP is not available on Innova cards with IPsec support\n");
- return -EINVAL;
- }
-
new_params = priv->channels.params;
new_params.xdp_prog = prog;
@@ -4934,7 +4922,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_ipsec_build_netdev(priv);
- mlx5e_tls_build_netdev(priv);
+ mlx5e_ktls_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4996,7 +4984,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
- err = mlx5e_tls_init(priv);
+ err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5007,7 +4995,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
- mlx5e_tls_cleanup(priv);
+ mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv);
}
@@ -5704,7 +5692,6 @@ int mlx5e_init(void)
{
int ret;
- mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6b7e7ea6ded2..47f7b4c034cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(ptp),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 56bb58704bf9..fb11081001a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,10 +48,9 @@
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -1416,7 +1415,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
- mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(get_cqe_tls_offload(cqe)))
+ mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
@@ -1521,8 +1521,8 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
}
static struct sk_buff *
-mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
@@ -1565,8 +1565,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
static struct sk_buff *
-mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
@@ -1709,7 +1709,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1762,7 +1762,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -2361,7 +2361,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ rq, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -2383,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
};
#endif /* CONFIG_MLX5_CORE_IPOIB */
-#ifdef CONFIG_MLX5_EN_IPSEC
-
-static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- struct mlx5e_wqe_frag_info *wi;
- struct sk_buff *skb;
- u32 cqe_bcnt;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- wi = get_frag(rq, ci);
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
-
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
- goto wq_free_wqe;
- }
-
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
- mlx5e_skb_from_cqe_linear,
- mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
- goto wq_free_wqe;
-
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb))
- goto wq_free_wqe;
-
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- napi_gro_receive(rq->cq.napi, skb);
-
-wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi, true);
- mlx5_wq_cyc_pop(wq);
-}
-
-#endif /* CONFIG_MLX5_EN_IPSEC */
-
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
struct net_device *netdev = rq->netdev;
@@ -2439,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
- return -EINVAL;
- }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) {
@@ -2466,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-#ifdef CONFIG_MLX5_EN_IPSEC
- if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- priv->ipsec)
- rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
- else
-#endif
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL;
@@ -2504,7 +2453,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
if (!skb)
goto free_wqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index bdc870f9c2f3..57fa0489eeb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,7 +32,7 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
#include "en/port.h"
@@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
- return mlx5e_tls_get_count(priv);
+ return mlx5e_ktls_get_count(priv);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_tls_get_stats(priv, data + idx);
+ return idx + mlx5e_ktls_get_stats(priv, data + idx);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a7a025d15c14..e48b15b55b6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
-extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 458ec0bca1b8..25f2d2717aaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1582,6 +1582,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ if (MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 2a984e82ae16..750c32050165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -57,9 +57,6 @@ struct mlx5_fpga_device {
u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
-
- struct mlx5_fpga_ipsec *ipsec;
- struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
deleted file mode 100644
index 8ec148010d62..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/rhashtable.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
-
-#include "mlx5_core.h"
-#include "fs_cmd.h"
-#include "fpga/ipsec.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-
-enum mlx5_fpga_ipsec_cmd_status {
- MLX5_FPGA_IPSEC_CMD_PENDING,
- MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
- MLX5_FPGA_IPSEC_CMD_COMPLETE,
-};
-
-struct mlx5_fpga_ipsec_cmd_context {
- struct mlx5_fpga_dma_buf buf;
- enum mlx5_fpga_ipsec_cmd_status status;
- struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
- int status_code;
- struct completion complete;
- struct mlx5_fpga_device *dev;
- struct list_head list; /* Item in pending_cmds */
- u8 command[];
-};
-
-struct mlx5_fpga_esp_xfrm;
-
-struct mlx5_fpga_ipsec_sa_ctx {
- struct rhash_head hash;
- struct mlx5_ifc_fpga_ipsec_sa hw_sa;
- u32 sa_handle;
- struct mlx5_core_dev *dev;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-};
-
-struct mlx5_fpga_esp_xfrm {
- unsigned int num_rules;
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* xfrm lock */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-struct mlx5_fpga_ipsec_rule {
- struct rb_node node;
- struct fs_fte *fte;
- struct mlx5_fpga_ipsec_sa_ctx *ctx;
-};
-
-static const struct rhashtable_params rhash_sa = {
- /* Keep out "cmd" field from the key as it's
- * value is not constant during the lifetime
- * of the key object.
- */
- .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
-struct mlx5_fpga_ipsec {
- struct mlx5_fpga_device *fdev;
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
- struct mlx5_fpga_conn *conn;
-
- struct notifier_block fs_notifier_ingress_bypass;
- struct notifier_block fs_notifier_egress;
-
- /* Map hardware SA --> SA context
- * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
- * We will use this hash to avoid SAs duplication in fpga which
- * aren't allowed
- */
- struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
- struct mutex sa_hash_lock;
-
- /* Tree holding all rules for this fpga device
- * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
- */
- struct rb_root rules_rb;
- struct mutex rules_rb_lock; /* rules lock */
-
- struct ida halloc;
-};
-
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
- return false;
-
- return true;
-}
-
-static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
-
- if (status) {
- context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
- buf);
- mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
- status);
- context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
- complete(&context->complete);
- }
-}
-
-static inline
-int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
-{
- switch (syndrome) {
- case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
- return 0;
- case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
- return -EEXIST;
- case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
- return -EINVAL;
- case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
- return -EIO;
- }
- return -EIO;
-}
-
-static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
- struct mlx5_fpga_ipsec_cmd_context *context;
- enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
- struct mlx5_fpga_device *fdev = cb_arg;
- unsigned long flags;
-
- if (buf->sg[0].size < sizeof(*resp)) {
- mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
- buf->sg[0].size, sizeof(*resp));
- return;
- }
-
- mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
- ntohl(resp->syndrome));
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
- struct mlx5_fpga_ipsec_cmd_context,
- list);
- if (context)
- list_del(&context->list);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (!context) {
- mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
- return;
- }
- mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
-
- syndrome = ntohl(resp->syndrome);
- context->status_code = syndrome_to_errno(syndrome);
- context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
- memcpy(&context->resp, resp, sizeof(*resp));
-
- if (context->status_code)
- mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
- syndrome);
-
- complete(&context->complete);
-}
-
-static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
- const void *cmd, int cmd_size)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned long flags;
- int res;
-
- if (!fdev || !fdev->ipsec)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (cmd_size & 3)
- return ERR_PTR(-EINVAL);
-
- context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
- context->dev = fdev;
- context->buf.complete = mlx5_fpga_ipsec_send_complete;
- init_completion(&context->complete);
- memcpy(&context->command, cmd, cmd_size);
- context->buf.sg[0].size = cmd_size;
- context->buf.sg[0].data = &context->command;
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
- if (!res)
- list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (res) {
- mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
- kfree(context);
- return ERR_PTR(res);
- }
-
- /* Context should be freed by the caller after completion. */
- return context;
-}
-
-static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
-{
- struct mlx5_fpga_ipsec_cmd_context *context = ctx;
- unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
- int res;
-
- res = wait_for_completion_timeout(&context->complete, timeout);
- if (!res) {
- mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
- return -ETIMEDOUT;
- }
-
- if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
- res = context->status_code;
- else
- res = -EIO;
-
- return res;
-}
-
-static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
-{
- if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
- return true;
- return false;
-}
-
-static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
- int opcode)
-{
- struct mlx5_core_dev *dev = fdev->mdev;
- struct mlx5_ifc_fpga_ipsec_sa *sa;
- struct mlx5_fpga_ipsec_cmd_context *cmd_context;
- size_t sa_cmd_size;
- int err;
-
- hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
- if (is_v2_sadb_supported(fdev->ipsec))
- sa_cmd_size = sizeof(*hw_sa);
- else
- sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
-
- cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
- mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
- if (IS_ERR(cmd_context))
- return PTR_ERR(cmd_context);
-
- err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
- if (err)
- goto out;
-
- sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
- if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
- mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
- ntohl(sa->ipsec_sa_v1.sw_sa_handle),
- ntohl(cmd_context->resp.sw_sa_handle));
- err = -EIO;
- }
-
-out:
- kfree(cmd_context);
- return err;
-}
-
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- u32 ret = 0;
-
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
- ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
- } else {
- return ret;
- }
-
- if (!fdev->ipsec)
- return ret;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
- ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
- ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
- ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
- ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
- ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- return ret;
-}
-
-static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- number_of_ipsec_counters);
-}
-
-static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int counters_count)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned int i;
- __be32 *data;
- u32 count;
- u64 addr;
- int ret;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_low) +
- ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_high) << 32);
-
- count = mlx5_fpga_ipsec_counters_count(mdev);
-
- data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
- MLX5_FPGA_ACCESS_TYPE_DONTCARE);
- if (ret < 0) {
- mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
- ret);
- goto out;
- }
- ret = 0;
-
- if (count > counters_count)
- count = counters_count;
-
- /* Each counter is low word, then high. But each word is big-endian */
- for (i = 0; i < count; i++)
- counters[i] = (u64)ntohl(data[i * 2]) |
- ((u64)ntohl(data[i * 2 + 1]) << 32);
-
-out:
- kfree(data);
- return ret;
-}
-
-static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
- int err;
-
- cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
- cmd.flags = htonl(flags);
- context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context))
- return PTR_ERR(context);
-
- err = mlx5_fpga_ipsec_cmd_wait(context);
- if (err)
- goto out;
-
- if ((context->resp.flags & cmd.flags) != cmd.flags) {
- mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
- cmd.flags,
- context->resp.flags);
- err = -EIO;
- }
-
-out:
- kfree(context);
- return err;
-}
-
-static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
-{
- u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
- u32 flags = 0;
-
- if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
- flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
-
- return mlx5_fpga_ipsec_set_caps(mdev, flags);
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
-
- /* key */
- memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
- aes_gcm->key_len / 8);
- /* Duplicate 128 bit key twice according to HW layout */
- if (aes_gcm->key_len == 128)
- memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
- aes_gcm->aes_key, aes_gcm->key_len / 8);
-
- /* salt and seq_iv */
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
- sizeof(aes_gcm->seq_iv));
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
- sizeof(aes_gcm->salt));
-
- /* esn */
- if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags |=
- (xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = htonl(xfrm_attrs->esn);
- } else {
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags &=
- ~(xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = 0;
- }
-
- /* rx handle */
- hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
-
- /* enc mode */
- switch (aes_gcm->key_len) {
- case 128:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
- break;
- case 256:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
- break;
- }
-
- /* flags */
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
- MLX5_FPGA_IPSEC_SA_SPI_EN |
- MLX5_FPGA_IPSEC_SA_IP_ESP;
-
- if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
- else
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
-
- /* IPs */
- memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
- memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
-
- /* SPI */
- hw_sa->ipsec_sa_v1.spi = spi;
-
- /* flags */
- if (is_ipv6)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
-}
-
-static bool is_full_mask(const void *p, size_t len)
-{
- WARN_ON(len % 4);
-
- return !memchr_inv(p, 0xff, len);
-}
-
-static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
- const u32 *match_c,
- const u32 *match_v)
-{
- const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- misc_parameters);
- const void *headers_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
- outer_headers);
-
- if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
- const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-
- if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)) ||
- !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)))
- return false;
- } else {
- const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6);
- const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
-
- if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)) ||
- !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)))
- return false;
- }
-
- if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
- outer_esp_spi),
- MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v)
-{
- u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
- bool ipv6_flow;
-
- ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
-
- if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
- mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
- mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
- mlx5_fs_is_vxlan_flow(match_c) ||
- !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
- ipv6_flow))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
- mlx5_fs_is_outer_ipsec_flow(match_c))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
- ipv6_flow)
- return false;
-
- if (!validate_fpga_full_mask(dev, match_c, match_v))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_context *flow_context)
-{
- const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
- bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
- int ret;
-
- ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
- match_v);
- if (!ret)
- return ret;
-
- if (is_dmac || is_smac ||
- (match_criteria_enable &
- ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
- (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
- return false;
-
- return true;
-}
-
-static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle)
-{
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(accel_xfrm, typeof(*fpga_xfrm),
- accel_xfrm);
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode, err;
- void *context;
-
- /* alloc SA */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- /* build candidate SA */
- mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
- saddr, daddr, spi, is_ipv6,
- &sa_ctx->hw_sa);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
- /* all rules must be with same IPs and SPI */
- if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
- sizeof(sa_ctx->hw_sa))) {
- context = ERR_PTR(-EINVAL);
- goto exists;
- }
-
- ++fpga_xfrm->num_rules;
- context = fpga_xfrm->sa_ctx;
- goto exists;
- }
-
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
- if (err < 0) {
- context = ERR_PTR(err);
- goto exists;
- }
-
- sa_ctx->sa_handle = err;
- if (sa_handle)
- *sa_handle = sa_ctx->sa_handle;
- }
- /* This is unbounded fpga_xfrm, try to add to hash */
- mutex_lock(&fipsec->sa_hash_lock);
-
- err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa);
- if (err) {
- /* Can't bound different accel_xfrm to already existing sa_ctx.
- * This is because we can't support multiple ketmats for
- * same IPs and SPI
- */
- context = ERR_PTR(-EEXIST);
- goto unlock_hash;
- }
-
- /* Bound accel_xfrm to sa_ctx */
- opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- context = ERR_PTR(err);
- goto delete_hash;
- }
-
- mutex_unlock(&fipsec->sa_hash_lock);
-
- ++fpga_xfrm->num_rules;
- fpga_xfrm->sa_ctx = sa_ctx;
- sa_ctx->fpga_xfrm = fpga_xfrm;
-
- mutex_unlock(&fpga_xfrm->lock);
-
- return sa_ctx;
-
-delete_hash:
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
-unlock_hash:
- mutex_unlock(&fipsec->sa_hash_lock);
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-exists:
- mutex_unlock(&fpga_xfrm->lock);
- kfree(sa_ctx);
- return context;
-}
-
-static void *
-mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- bool is_egress)
-{
- struct mlx5_accel_esp_xfrm *accel_xfrm;
- __be32 saddr[4], daddr[4], spi;
- struct mlx5_flow_group *fg;
- bool is_ipv6 = false;
-
- fs_get_obj(fg, fte->node.parent);
- /* validate */
- if (is_egress &&
- !mlx5_is_fpga_egress_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val,
- &fte->action,
- &fte->flow_context))
- return ERR_PTR(-EINVAL);
- else if (!mlx5_is_fpga_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val))
- return ERR_PTR(-EINVAL);
-
- /* get xfrm context */
- accel_xfrm =
- (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
-
- /* IPs */
- if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
- fte->val)) {
- memcpy(&saddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
- sizeof(saddr[3]));
- memcpy(&daddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- sizeof(daddr[3]));
- } else {
- memcpy(saddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(saddr));
- memcpy(daddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(daddr));
- is_ipv6 = true;
- }
-
- /* SPI */
- spi = MLX5_GET_BE(typeof(spi),
- fte_match_param, fte->val,
- misc_parameters.outer_esp_spi);
-
- /* create */
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
- saddr, daddr,
- spi, is_ipv6, NULL);
-}
-
-static void
-mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
-{
- struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
- int err;
-
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- WARN_ON(err);
- return;
- }
-
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
- MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-
- mutex_lock(&fipsec->sa_hash_lock);
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
- mutex_unlock(&fipsec->sa_hash_lock);
-}
-
-static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
-
- mutex_lock(&fpga_xfrm->lock);
- if (!--fpga_xfrm->num_rules) {
- mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
- kfree(fpga_xfrm->sa_ctx);
- fpga_xfrm->sa_ctx = NULL;
- }
- mutex_unlock(&fpga_xfrm->lock);
-}
-
-static inline struct mlx5_fpga_ipsec_rule *
-_rule_search(struct rb_root *root, struct fs_fte *fte)
-{
- struct rb_node *node = root->rb_node;
-
- while (node) {
- struct mlx5_fpga_ipsec_rule *rule =
- container_of(node, struct mlx5_fpga_ipsec_rule,
- node);
-
- if (rule->fte < fte)
- node = node->rb_left;
- else if (rule->fte > fte)
- node = node->rb_right;
- else
- return rule;
- }
- return NULL;
-}
-
-static struct mlx5_fpga_ipsec_rule *
-rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
-{
- struct mlx5_fpga_ipsec_rule *rule;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rule = _rule_search(&ipsec_dev->rules_rb, fte);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return rule;
-}
-
-static inline int _rule_insert(struct rb_root *root,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_node **new = &root->rb_node, *parent = NULL;
-
- /* Figure out where to put new node */
- while (*new) {
- struct mlx5_fpga_ipsec_rule *this =
- container_of(*new, struct mlx5_fpga_ipsec_rule,
- node);
-
- parent = *new;
- if (rule->fte < this->fte)
- new = &((*new)->rb_left);
- else if (rule->fte > this->fte)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&rule->node, parent, new);
- rb_insert_color(&rule->node, root);
-
- return 0;
-}
-
-static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- int ret;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- ret = _rule_insert(&ipsec_dev->rules_rb, rule);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return ret;
-}
-
-static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_root *root = &ipsec_dev->rules_rb;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rb_erase(&rule->node, root);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-}
-
-static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- _rule_delete(ipsec_dev, rule);
- kfree(rule);
-}
-
-struct mailbox_mod {
- uintptr_t saved_esp_id;
- u32 saved_action;
- u32 saved_outer_esp_spi_value;
-};
-
-static void restore_spec_mailbox(struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- mbox_mod->saved_outer_esp_spi_value);
- fte->action.action |= mbox_mod->saved_action;
- fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
-}
-
-static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- mbox_mod->saved_esp_id = fte->action.esp_id;
- mbox_mod->saved_action = fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- mbox_mod->saved_outer_esp_spi_value =
- MLX5_GET(fte_match_set_misc, misc_params_v,
- outer_esp_spi);
-
- fte->action.esp_id = 0;
- fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- if (!MLX5_CAP_FLOWTABLE(mdev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
-}
-
-static enum fs_flow_table_type egress_to_fs_ft(bool egress)
-{
- return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
-}
-
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg,
- bool is_egress)
-{
- int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft, u32 *in,
- struct mlx5_flow_group *fg) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
- char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.misc_parameters);
- struct mlx5_core_dev *dev = ns->dev;
- u32 saved_outer_esp_spi_mask;
- u8 match_criteria_enable;
- int ret;
-
- if (MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(ns, ft, in, fg);
-
- match_criteria_enable =
- MLX5_GET(create_flow_group_in, in, match_criteria_enable);
- saved_outer_esp_spi_mask =
- MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
- if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
-
- if (!(*misc_params_c) &&
- !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
- MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
-
- ret = create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*create_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(ns, ft, fg, fte);
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
- return -ENOMEM;
-
- rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
- if (IS_ERR(rule->ctx)) {
- int err = PTR_ERR(rule->ctx);
-
- kfree(rule);
- return err;
- }
-
- rule->fte = fte;
- WARN_ON(rule_insert(fipsec, rule));
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(ns, ft, fg, fte);
- restore_spec_mailbox(fte, &mbox_mod);
- if (ret) {
- _rule_delete(fipsec, rule);
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- kfree(rule);
- }
-
- return ret;
-}
-
-static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*update_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
- struct mlx5_core_dev *dev = ns->dev;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(ns, ft, fg, modify_mask, fte);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(ns, ft, fg, modify_mask, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(ns, ft, fte);
-
- rule = rule_search(fipsec, fte);
- if (!rule)
- return -ENOENT;
-
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- rule_delete(fipsec, rule);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(ns, ft, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
-}
-
-static struct mlx5_flow_cmds fpga_ipsec_ingress;
-static struct mlx5_flow_cmds fpga_ipsec_egress;
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- switch (type) {
- case FS_FT_NIC_RX:
- return &fpga_ipsec_ingress;
- case FS_FT_NIC_TX:
- return &fpga_ipsec_egress;
- default:
- WARN_ON(true);
- return NULL;
- }
-}
-
-static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn *conn;
- int err;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return 0;
-
- fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
- if (!fdev->ipsec)
- return -ENOMEM;
-
- fdev->ipsec->fdev = fdev;
-
- err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
- fdev->ipsec->caps);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
- err);
- goto error;
- }
-
- INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
- spin_lock_init(&fdev->ipsec->pending_cmds_lock);
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_ipsec_recv;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
- err);
- goto error;
- }
- fdev->ipsec->conn = conn;
-
- err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
- if (err)
- goto err_destroy_conn;
- mutex_init(&fdev->ipsec->sa_hash_lock);
-
- fdev->ipsec->rules_rb = RB_ROOT;
- mutex_init(&fdev->ipsec->rules_rb_lock);
-
- err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
- err);
- goto err_destroy_hash;
- }
-
- ida_init(&fdev->ipsec->halloc);
-
- return 0;
-
-err_destroy_hash:
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
-err_destroy_conn:
- mlx5_fpga_sbu_conn_destroy(conn);
-
-error:
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
- return err;
-}
-
-static void destroy_rules_rb(struct rb_root *root)
-{
- struct mlx5_fpga_ipsec_rule *r, *tmp;
-
- rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
- rb_erase(&r->node, root);
- mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
- kfree(r);
- }
-}
-
-static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return;
-
- ida_destroy(&fdev->ipsec->halloc);
- destroy_rules_rb(&fdev->ipsec->rules_rb);
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
- mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
-}
-
-void mlx5_fpga_ipsec_build_fs_cmds(void)
-{
- /* ingress */
- fpga_ipsec_ingress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
- fpga_ipsec_ingress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
- fpga_ipsec_ingress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
- fpga_ipsec_ingress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_ingress;
- fpga_ipsec_ingress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
- fpga_ipsec_ingress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_ingress;
- fpga_ipsec_ingress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_ingress;
- fpga_ipsec_ingress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_ingress;
- fpga_ipsec_ingress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
-
- /* egress */
- fpga_ipsec_egress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
- fpga_ipsec_egress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
- fpga_ipsec_egress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
- fpga_ipsec_egress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_egress;
- fpga_ipsec_egress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
- fpga_ipsec_egress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_egress;
- fpga_ipsec_egress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_egress;
- fpga_ipsec_egress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_egress;
- fpga_ipsec_egress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
-}
-
-static int
-mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->tfc_pad) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.icv_len != 128) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
- v2_command))) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-
- if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
- mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
- if (!fpga_xfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&fpga_xfrm->lock);
- memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
- sizeof(fpga_xfrm->accel_xfrm.attrs));
-
- return &fpga_xfrm->accel_xfrm;
-}
-
-static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(xfrm, struct mlx5_fpga_esp_xfrm,
- accel_xfrm);
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- kfree(fpga_xfrm);
-}
-
-static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
- struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return -EOPNOTSUPP;
- }
-
- if (is_v2_sadb_supported(fipsec)) {
- mlx5_core_warn(mdev, "Modify esp is not supported\n");
- return -EOPNOTSUPP;
- }
-
- fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (!fpga_xfrm->sa_ctx)
- /* Unbounded xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* copy original hw sa */
- memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
- mutex_lock(&fipsec->sa_hash_lock);
- /* remove original hw sa from hash */
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa));
- /* update hw_sa with new xfrm attrs*/
- mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
- &fpga_xfrm->sa_ctx->hw_sa);
- /* try to insert new hw_sa to hash */
- err = rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa);
- if (err)
- goto rollback_sa;
-
- /* modify device with new hw_sa */
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
- fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err)
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
-rollback_sa:
- if (err) {
- /* return original hw_sa to hash */
- memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
- sizeof(org_hw_sa));
- WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
- }
- mutex_unlock(&fipsec->sa_hash_lock);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
- mutex_unlock(&fpga_xfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
- .device_caps = mlx5_fpga_ipsec_device_caps,
- .counters_count = mlx5_fpga_ipsec_counters_count,
- .counters_read = mlx5_fpga_ipsec_counters_read,
- .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
- .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
- .init = mlx5_fpga_ipsec_init,
- .cleanup = mlx5_fpga_ipsec_cleanup,
- .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
- .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
- .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return NULL;
-
- return &fpga_ipsec_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
deleted file mode 100644
index 8931b5584477..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_IPSEC_H__
-#define __MLX5_FPGA_IPSEC_H__
-
-#include "accel/ipsec.h"
-#include "fs_cmd.h"
-
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
-const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-void mlx5_fpga_ipsec_build_fs_cmds(void);
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
-#else
-static inline
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{ return NULL; }
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- return mlx5_fs_cmd_get_default(type);
-}
-
-static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
-static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif /* CONFIG_MLX5_FPGA_IPSEC */
-#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
deleted file mode 100644
index 29b7339ebfa3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-#include "fpga/tls.h"
-#include "fpga/cmd.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-#include "accel/tls.h"
-
-struct mlx5_fpga_tls_command_context;
-
-typedef void (*mlx5_fpga_tls_command_complete)
- (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *ctx,
- struct mlx5_fpga_dma_buf *resp);
-
-struct mlx5_fpga_tls_command_context {
- struct list_head list;
- /* There is no guarantee on the order between the TX completion
- * and the command response.
- * The TX completion is going to touch cmd->buf even in
- * the case of successful transmission.
- * So instead of requiring separate allocations for cmd
- * and cmd->buf we've decided to use a reference counter
- */
- refcount_t ref;
- struct mlx5_fpga_dma_buf buf;
- mlx5_fpga_tls_command_complete complete;
-};
-
-static void
-mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
-{
- if (refcount_dec_and_test(&ctx->ref))
- kfree(ctx);
-}
-
-static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_fpga_conn *conn = fdev->tls->conn;
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- ctx = list_first_entry(&tls->pending_cmds,
- struct mlx5_fpga_tls_command_context, list);
- list_del(&ctx->list);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
- ctx->complete(conn, fdev, ctx, resp);
-}
-
-static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_tls_command_context *ctx =
- container_of(buf, struct mlx5_fpga_tls_command_context, buf);
-
- mlx5_fpga_tls_put_command_ctx(ctx);
-
- if (unlikely(status))
- mlx5_fpga_tls_cmd_complete(fdev, NULL);
-}
-
-static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- mlx5_fpga_tls_command_complete complete)
-{
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
- int ret;
-
- refcount_set(&cmd->ref, 2);
- cmd->complete = complete;
- cmd->buf.complete = mlx5_fpga_cmd_send_complete;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
- * to make sure commands are inserted to the tls->pending_cmds list
- * and the command QP in the same order.
- */
- ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
- if (likely(!ret))
- list_add_tail(&cmd->list, &tls->pending_cmds);
- else
- complete(tls->conn, fdev, cmd, NULL);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
-}
-
-/* Start of context identifiers range (inclusive) */
-#define SWID_START 0
-/* End of context identifiers range (exclusive) */
-#define SWID_END BIT(24)
-
-static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
- void *ptr)
-{
- unsigned long flags;
- int ret;
-
- /* TLS metadata format is 1 byte for syndrome followed
- * by 3 bytes of swid (software ID)
- * swid must not exceed 3 bytes.
- * See tls_rxtx.c:insert_pet() for details
- */
- BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(idr_spinlock, flags);
- ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irqrestore(idr_spinlock, flags);
- idr_preload_end();
-
- return ret;
-}
-
-static void *mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
-{
- unsigned long flags;
- void *ptr;
-
- spin_lock_irqsave(idr_spinlock, flags);
- ptr = idr_remove(idr, swid);
- spin_unlock_irqrestore(idr_spinlock, flags);
- return ptr;
-}
-
-static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf, u8 status)
-{
- kfree(buf);
-}
-
-static void
-mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- if (resp) {
- u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
-
- if (syndrome)
- mlx5_fpga_err(fdev,
- "Teardown stream failed with syndrome = %d",
- syndrome);
- }
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
-{
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
- MLX5_BYTE_OFF(tls_flow, ipv6));
-
- MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
- MLX5_SET(tls_cmd, cmd, direction_sx,
- MLX5_GET(tls_flow, flow, direction_sx));
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- struct mlx5_fpga_dma_buf *buf;
- int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
- void *flow;
- void *cmd;
- int ret;
-
- buf = kzalloc(size, GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
- cmd = (buf + 1);
-
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- if (unlikely(!flow)) {
- rcu_read_unlock();
- WARN_ONCE(1, "Received NULL pointer for handle\n");
- kfree(buf);
- return -EINVAL;
- }
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- rcu_read_unlock();
-
- MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
- MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
- MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- buf->complete = mlx_tls_kfree_complete;
-
- ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
- if (ret < 0)
- kfree(buf);
-
- return ret;
-}
-
-static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
- void *flow, u32 swid, gfp_t flags)
-{
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_dma_buf *buf;
- void *cmd;
-
- ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
- if (!ctx)
- return;
-
- buf = &ctx->buf;
- cmd = (ctx + 1);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
- MLX5_SET(tls_cmd, cmd, swid, swid);
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- kfree(flow);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
- mlx5_fpga_tls_teardown_completion);
-}
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- void *flow;
-
- if (direction_sx)
- flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock,
- swid);
- else
- flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock,
- swid);
-
- if (!flow) {
- mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
- swid);
- return;
- }
-
- synchronize_rcu(); /* before kfree(flow) */
- mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
-}
-
-enum mlx5_fpga_setup_stream_status {
- MLX5_FPGA_CMD_PENDING,
- MLX5_FPGA_CMD_SEND_FAILED,
- MLX5_FPGA_CMD_RESPONSE_RECEIVED,
- MLX5_FPGA_CMD_ABANDONED,
-};
-
-struct mlx5_setup_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- atomic_t status;
- u32 syndrome;
- struct completion comp;
-};
-
-static void
-mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_setup_stream_context *ctx =
- container_of(cmd, struct mlx5_setup_stream_context, cmd);
- int status = MLX5_FPGA_CMD_SEND_FAILED;
- void *tls_cmd = ctx + 1;
-
- /* If we failed to send to command resp == NULL */
- if (resp) {
- ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
- status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
- }
-
- status = atomic_xchg_release(&ctx->status, status);
- if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
- complete(&ctx->comp);
- return;
- }
-
- mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
- ctx->syndrome);
-
- if (!ctx->syndrome) {
- /* The process was killed while waiting for the context to be
- * added, and the add completed successfully.
- * We need to destroy the HW context, and we can't can't reuse
- * the command context because we might not have received
- * the tx completion yet.
- */
- mlx5_fpga_tls_del_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC,
- MLX5_GET(tls_cmd, tls_cmd,
- direction_sx));
- }
-
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
- struct mlx5_setup_stream_context *ctx)
-{
- struct mlx5_fpga_dma_buf *buf;
- void *cmd = ctx + 1;
- int status, ret = 0;
-
- buf = &ctx->cmd.buf;
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
-
- init_completion(&ctx->comp);
- atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
- ctx->syndrome = -1;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
- mlx5_fpga_tls_setup_completion);
- wait_for_completion_killable(&ctx->comp);
-
- status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
- if (unlikely(status == MLX5_FPGA_CMD_PENDING))
- /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
- return -EINTR;
-
- if (unlikely(ctx->syndrome))
- ret = -ENOMEM;
-
- mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
- return ret;
-}
-
-static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
- struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
-
- mlx5_fpga_tls_cmd_complete(fdev, buf);
-}
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
- return false;
-
- return true;
-}
-
-static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
- u32 *p_caps)
-{
- int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
- u32 caps = 0;
- void *buf;
-
- buf = kzalloc(cap_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
- if (err)
- goto out;
-
- if (MLX5_GET(tls_extended_cap, buf, tx))
- caps |= MLX5_ACCEL_TLS_TX;
- if (MLX5_GET(tls_extended_cap, buf, rx))
- caps |= MLX5_ACCEL_TLS_RX;
- if (MLX5_GET(tls_extended_cap, buf, tls_v12))
- caps |= MLX5_ACCEL_TLS_V12;
- if (MLX5_GET(tls_extended_cap, buf, tls_v13))
- caps |= MLX5_ACCEL_TLS_V13;
- if (MLX5_GET(tls_extended_cap, buf, lro))
- caps |= MLX5_ACCEL_TLS_LRO;
- if (MLX5_GET(tls_extended_cap, buf, ipv6))
- caps |= MLX5_ACCEL_TLS_IPV6;
-
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
- caps |= MLX5_ACCEL_TLS_AES_GCM128;
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
- caps |= MLX5_ACCEL_TLS_AES_GCM256;
-
- *p_caps = caps;
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_conn *conn;
- struct mlx5_fpga_tls *tls;
- int err = 0;
-
- if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
- if (err)
- goto error;
-
- if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
- err = -ENOTSUPP;
- goto error;
- }
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
- err);
- goto error;
- }
-
- tls->conn = conn;
- spin_lock_init(&tls->pending_cmds_lock);
- INIT_LIST_HEAD(&tls->pending_cmds);
-
- idr_init(&tls->tx_idr);
- idr_init(&tls->rx_idr);
- spin_lock_init(&tls->tx_idr_spinlock);
- spin_lock_init(&tls->rx_idr_spinlock);
- fdev->tls = tls;
- return 0;
-
-error:
- kfree(tls);
- return err;
-}
-
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->tls)
- return;
-
- mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
- kfree(fdev->tls);
- fdev->tls = NULL;
-}
-
-static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
- struct tls_crypto_info *info,
- __be64 *rcd_sn)
-{
- struct tls12_crypto_info_aes_gcm_128 *crypto_info =
- (struct tls12_crypto_info_aes_gcm_128 *)info;
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
- crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- /* in AES-GCM 128 we need to write the key twice */
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
- TLS_CIPHER_AES_GCM_128_KEY_SIZE,
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
-}
-
-static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
- struct tls_crypto_info *crypto_info)
-{
- __be64 rcd_sn;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
- return -EINVAL;
- mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 swid, u32 tcp_sn)
-{
- u32 caps = mlx5_fpga_tls_device_caps(mdev);
- struct mlx5_setup_stream_context *ctx;
- int ret = -ENOMEM;
- size_t cmd_size;
- void *cmd;
-
- cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
- ctx = kzalloc(cmd_size, GFP_KERNEL);
- if (!ctx)
- goto out;
-
- cmd = ctx + 1;
- ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
- if (ret)
- goto free_ctx;
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
-
- MLX5_SET(tls_cmd, cmd, swid, swid);
- MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
-
- return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
-
-free_ctx:
- kfree(ctx);
-out:
- return ret;
-}
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- int ret = -ENOMEM;
- u32 swid;
-
- if (direction_sx)
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, flow);
- else
- ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, flow);
-
- if (ret < 0)
- return ret;
-
- swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
-
- ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
- if (ret && ret != -EINTR)
- goto free_swid;
-
- *p_swid = swid;
- return 0;
-free_swid:
- if (direction_sx)
- mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, swid);
- else
- mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, swid);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
deleted file mode 100644
index 5714cf391d1b..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_TLS_H__
-#define __MLX5_FPGA_TLS_H__
-
-#include <linux/mlx5/driver.h>
-
-#include <net/tls.h>
-#include "fpga/core.h"
-
-struct mlx5_fpga_tls {
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps;
- struct mlx5_fpga_conn *conn;
-
- struct idr tx_idr;
- struct idr rx_idr;
- spinlock_t tx_idr_spinlock; /* protects the IDR */
- spinlock_t rx_idr_spinlock; /* protects the IDR */
-};
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx);
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
-
-static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mdev->fpga->tls->caps;
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-
-#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a0ac17c3f12f..2ccf7bef9b05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -455,7 +455,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
@@ -571,18 +572,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
- unsigned int id, type = dst->dest_attr.type;
+ enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ enum mlx5_ifc_flow_destination_type ifc_type;
+ unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = dst->dest_attr.ft_num;
- type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
@@ -596,8 +602,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
/* destination_id is reserved */
id = 0;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
break;
}
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
@@ -612,13 +620,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = dst->dest_attr.sampler_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = dst->dest_attr.tir_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
- type);
+ ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
@@ -878,9 +888,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
-#ifdef CONFIG_MLX5_IPSEC
case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
-#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 816d991f7621..fb8175672478 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,8 +40,6 @@
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -188,24 +186,18 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
-#ifdef CONFIG_MLX5_IPSEC
.ar_size = 2,
-#else
- .ar_size = 1,
-#endif
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
-#ifdef CONFIG_MLX5_IPSEC
ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
-#endif
}
};
@@ -432,6 +424,16 @@ static bool is_fwd_next_action(u32 action)
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
}
+static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
+{
+ return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
+ type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TIR;
+}
+
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
@@ -558,8 +560,8 @@ static void del_sw_hw_rule(struct fs_node *node)
mutex_unlock(&rule->dest_attr.ft->lock);
}
- if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
- --fte->dests_size) {
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
+ --fte->dests_size;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
@@ -567,17 +569,23 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
- if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
- --fte->dests_size) {
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ --fte->dests_size;
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
- if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- --fte->dests_size) {
+ if (is_fwd_dest_type(rule->dest_attr.type)) {
+ --fte->dests_size;
+ --fte->fwd_dests;
+
+ if (!fte->fwd_dests)
+ fte->action.action &=
+ ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ goto out;
}
out:
kfree(rule);
@@ -597,6 +605,7 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
+ WARN_ON(fte->dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
@@ -1296,6 +1305,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
rule->node.type = FS_TYPE_FLOW_DEST;
if (dest)
memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ else
+ rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
return rule;
}
@@ -1372,6 +1383,9 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
fte->dests_size++;
+ if (is_fwd_dest_type(dest[i].type))
+ fte->fwd_dests++;
+
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
*modify_mask |= type ? count : dst;
@@ -2071,16 +2085,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->dests_size) {
- if (fte->modify_mask)
- modify_fte(fte);
- up_write_ref_node(&fte->node, false);
- } else if (list_empty(&fte->node.children)) {
- del_hw_fte(&fte->node);
+ if (list_empty(&fte->node.children)) {
+ fte->node.del_hw_func(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
} else {
up_write_ref_node(&fte->node, false);
}
@@ -2519,10 +2533,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
- cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
-
/* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
@@ -3172,8 +3182,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c488a7c5b07e..67cad7a6d836 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -226,6 +226,7 @@ struct fs_fte {
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
+ u32 fwd_dests;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 614687e0e3d9..cfb8bedba512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,7 +35,6 @@
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
-#include "accel/tls.h"
enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1,
@@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
+ if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2589e39eb9c7..35e48ef04845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,7 @@
#include "lib/mlx5.h"
#include "lib/tout.h"
#include "fpga/core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ipsec.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
#include "lib/geneve.h"
@@ -179,30 +177,29 @@ static struct mlx5_profile profile[] = {
},
};
-static int fw_initializing(struct mlx5_core_dev *dev)
-{
- return ioread32be(&dev->iseg->initializing) >> 31;
-}
-
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+ u32 fw_initializing;
int err = 0;
- while (fw_initializing(dev)) {
+ do {
+ fw_initializing = ioread32be(&dev->iseg->initializing);
+ if (!(fw_initializing >> 31))
+ break;
if (time_after(jiffies, end)) {
err = -EBUSY;
break;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
- jiffies_to_msecs(end - warn) / 1000);
+ mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
+ jiffies_to_msecs(end - warn) / 1000, fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
- }
+ } while (true);
return err;
}
@@ -1183,14 +1180,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
- mlx5_accel_ipsec_init(dev);
-
- err = mlx5_accel_tls_init(dev);
- if (err) {
- mlx5_core_err(dev, "TLS device start failed %d\n", err);
- goto err_tls_start;
- }
-
err = mlx5_init_fs(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
@@ -1238,9 +1227,6 @@ err_vhca:
err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
- mlx5_accel_tls_cleanup(dev);
-err_tls_start:
- mlx5_accel_ipsec_cleanup(dev);
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
@@ -1266,8 +1252,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
@@ -1947,7 +1931,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
- mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 4dd619d238cc..223c8741b7ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -311,7 +311,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
- MLX5_FLOW_DESTINATION_TYPE_VPORT);
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
@@ -604,7 +604,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
for (i = 0; i < fte->dests_size; i++) {
- if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+ fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
@@ -719,18 +720,24 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
for (i = 0; i < fte->dests_size; i++) {
- unsigned int id, type = fte->dest_arr[i].type;
+ enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
+ enum mlx5_ifc_flow_destination_type ifc_type;
+ unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = fte->dest_arr[i].ft_num;
- type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
@@ -740,8 +747,10 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
} else {
id = 0;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, 1);
}
@@ -761,13 +770,15 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = fte->dest_arr[i].sampler_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = fte->dest_arr[i].tir_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
- type);
+ ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 196adeb33495..1a465fd5d8b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
- core_acl_flex_actions.o core_env.o
+ core_acl_flex_actions.o core_env.o \
+ core_linecards.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b13e0f8d232a..fc52832241b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -48,6 +48,7 @@ struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
u16 local_port;
+ struct mlxsw_linecard *linecard;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -82,6 +83,7 @@ struct mlxsw_core {
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
+ struct mlxsw_linecards *linecards;
struct mlxsw_core_port *ports;
unsigned int max_ports;
atomic_t active_ports_count;
@@ -94,6 +96,17 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->linecards;
+}
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ mlxsw_core->linecards = linecards;
+}
+
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
static u64 mlxsw_ports_occ_get(void *priv)
@@ -2145,6 +2158,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_fw_rev_validate;
+ err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_linecards_init;
+
err = mlxsw_core_health_init(mlxsw_core);
if (err)
goto err_health_init;
@@ -2158,7 +2175,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
if (err)
goto err_env_init;
@@ -2183,6 +2200,8 @@ err_thermal_init:
err_hwmon_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
+ mlxsw_linecards_fini(mlxsw_core);
+err_linecards_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
@@ -2255,6 +2274,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_core_health_fini(mlxsw_core);
+ mlxsw_linecards_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
mlxsw_emad_fini(mlxsw_core);
@@ -2956,7 +2976,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get);
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -2979,6 +2999,15 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
+ if (slot_index) {
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(mlxsw_core->linecards,
+ slot_index);
+ mlxsw_core_port->linecard = linecard;
+ devlink_port_linecard_set(devlink_port,
+ linecard->devlink_linecard);
+ }
err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -2996,7 +3025,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
}
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -3005,7 +3034,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
int err;
err = __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes,
switch_id, switch_id_len);
@@ -3036,7 +3065,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
DEVLINK_PORT_FLAVOUR_CPU,
- 0, false, 0, false, 0,
+ 0, 0, false, 0, false, 0,
switch_id, switch_id_len);
if (err)
return err;
@@ -3112,6 +3141,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+
+ return mlxsw_core_port->linecard;
+}
+
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
@@ -3124,6 +3163,15 @@ bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
}
EXPORT_SYMBOL(mlxsw_core_port_is_xm);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
+ return;
+ mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
+}
+
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->env;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 16ee5e90973d..d008282d7f2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,6 +35,11 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecard);
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -231,7 +236,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split, u32 split_port_subnumber,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
@@ -252,7 +258,14 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -326,6 +339,10 @@ struct mlxsw_driver {
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
+ void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -543,4 +560,65 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
return (struct mlxsw_skb_cb *) skb->cb;
}
+struct mlxsw_linecards;
+
+enum mlxsw_linecard_status_event_type {
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
+};
+
+struct mlxsw_linecard {
+ u8 slot_index;
+ struct mlxsw_linecards *linecards;
+ struct devlink_linecard *devlink_linecard;
+ struct mutex lock; /* Locks accesses to the linecard structure */
+ char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN];
+ char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */
+ enum mlxsw_linecard_status_event_type status_event_type_to;
+ struct delayed_work status_event_to_dw;
+ u8 provisioned:1,
+ ready:1,
+ active:1;
+ u16 hw_revision;
+ u16 ini_version;
+ struct list_head device_list;
+};
+
+struct mlxsw_linecard_types_info;
+
+struct mlxsw_linecards {
+ struct mlxsw_core *mlxsw_core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 count;
+ struct mlxsw_linecard_types_info *types_info;
+ struct list_head event_ops_list;
+ struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
+ struct mlxsw_linecard linecards[];
+};
+
+static inline struct mlxsw_linecard *
+mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
+{
+ return &linecards->linecards[slot_index - 1];
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info);
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
+
+typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, void *priv);
+
+struct mlxsw_linecards_event_ops {
+ mlxsw_linecards_event_op_t *got_active;
+ mlxsw_linecards_event_op_t *got_inactive;
+};
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 29a74b8bd5b5..34bec9cd572c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -21,19 +21,60 @@ struct mlxsw_env_module_info {
enum mlxsw_reg_pmtm_module_type type;
};
-struct mlxsw_env {
- struct mlxsw_core *core;
+struct mlxsw_env_line_card {
u8 module_count;
- struct mutex module_info_lock; /* Protects 'module_info'. */
+ bool active;
struct mlxsw_env_module_info module_info[];
};
-static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 max_module_count; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mutex line_cards_lock; /* Protects line cards. */
+ struct mlxsw_env_line_card *line_cards[];
+};
+
+static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ return mlxsw_env->line_cards[slot_index]->active;
+}
+
+static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ bool active;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return active;
+}
+
+static struct
+mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return &mlxsw_env->line_cards[slot_index]->module_info[module];
+}
+
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ struct mlxsw_env_module_info *module_info;
int err;
- switch (mlxsw_env->module_info[module].type) {
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ module_info = mlxsw_env_module_info_get(core, slot_index, module);
+ switch (module_info->type) {
case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
err = -EINVAL;
break;
@@ -44,32 +85,34 @@ static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
return err;
}
-static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
int err;
- mutex_lock(&mlxsw_env->module_info_lock);
- err = __mlxsw_env_validate_module_type(core, module);
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = __mlxsw_env_validate_module_type(core, slot_index, module);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
static int
-mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
- bool *cmis)
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
+ bool *qsfp, bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
- err = mlxsw_env_validate_module_type(core, id);
+ err = mlxsw_env_validate_module_type(core, slot_index, id);
if (err)
return err;
- mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -99,8 +142,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
}
static int
-mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
- u16 offset, u16 size, void *data,
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -145,7 +188,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -162,8 +206,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
return 0;
}
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp)
+int
+mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
+ int module, int off, int *temp)
{
unsigned int module_temp, module_crit, module_emerg;
union {
@@ -177,8 +222,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int page;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
@@ -207,7 +253,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
*/
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp,
+ &cmis);
if (err)
return err;
@@ -219,12 +266,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -242,24 +289,31 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
}
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
u8 module_rev_id, module_id, diag_mon;
unsigned int read_size;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev,
"EEPROM is not equipped on port module type");
return err;
}
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0,
+ offset, module_info, false,
+ &read_size);
if (err)
return err;
@@ -288,9 +342,10 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
/* Verify if transceiver provides diagnostic monitoring page */
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
- SFP_DIAGMON, 1, &diag_mon,
- false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, SFP_DIAGMON, 1,
+ &diag_mon, false,
+ &read_size);
if (err)
return err;
@@ -329,9 +384,11 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
EXPORT_SYMBOL(mlxsw_env_get_module_info);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int offset = ee->offset;
unsigned int read_size;
bool qsfp, cmis;
@@ -341,14 +398,21 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
if (!ee->len)
return -EINVAL;
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
memset(data, 0, ee->len);
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module,
+ &qsfp, &cmis);
if (err)
return err;
while (i < ee->len) {
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, offset,
ee->len - i, data + i,
qsfp, &read_size);
if (err) {
@@ -394,15 +458,23 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl,
}
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u32 bytes_read = 0;
u16 device_addr;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot read EEPROM of module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
return err;
@@ -419,7 +491,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -443,20 +515,23 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
-static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
}
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
u32 req = *flags;
int err;
@@ -464,28 +539,34 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot reset module on an inactive line card\n");
+ return -EIO;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Reset module is not supported on port module type\n");
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_up) {
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ if (module_info->num_ports_mapped > 1 &&
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
err = -EINVAL;
goto out;
}
- err = mlxsw_env_module_reset(mlxsw_core, module);
+ err = mlxsw_env_module_reset(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Failed to reset module\n");
goto out;
@@ -494,32 +575,39 @@ int mlxsw_env_reset_module(struct net_device *netdev,
*flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_reset_module);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
char mcion_pl[MLXSW_REG_MCION_LEN];
u32 status_bits;
- int err;
+ int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
goto out;
}
- params->policy = mlxsw_env->module_info[module].power_mode_policy;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ params->policy = module_info->power_mode_policy;
+
+ /* Avoid accessing an inactive line card, as it will result in an error. */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out;
- mlxsw_reg_mcion_pack(mcion_pl, module);
+ mlxsw_reg_mcion_pack(mcion_pl, slot_index, module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
@@ -536,18 +624,18 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool enable)
+ u8 slot_index, u8 module, bool enable)
{
enum mlxsw_reg_pmaos_admin_status admin_status;
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
@@ -557,12 +645,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power)
+ u8 slot_index, u8 module,
+ bool low_power)
{
u16 eeprom_override_mask, eeprom_override;
char pmmp_pl[MLXSW_REG_PMMP_LEN];
- mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module);
mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
/* Mask all the bits except low power mode. */
eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
@@ -575,24 +664,34 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
}
static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power,
+ u8 slot_index, u8 module,
+ bool low_power,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err;
- err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ /* Avoid accessing an inactive line card, as it will result in an error.
+ * Cached configuration will be applied by mlxsw_env_got_active() when
+ * line card becomes active.
+ */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
return err;
}
- err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ low_power);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
goto err_module_low_power_set;
}
- err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
goto err_module_enable_set;
@@ -601,67 +700,84 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
return 0;
err_module_enable_set:
- mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+ mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ !low_power);
err_module_low_power_set:
- mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
return err;
}
-int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
- enum ethtool_module_power_mode_policy policy,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
{
- struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
bool low_power;
int err = 0;
- if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
- policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&mlxsw_env->module_info_lock);
-
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Power mode set is not supported on port module type");
goto out;
}
- if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy == policy)
goto out;
/* If any ports are up, we are already in high power mode. */
- if (mlxsw_env->module_info[module].num_ports_up)
+ if (module_info->num_ports_up)
goto out_set_policy;
low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
- extack);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ low_power, extack);
if (err)
goto out;
out_set_policy:
- mlxsw_env->module_info[module].power_mode_policy = policy;
+ module_info->power_mode_policy = policy;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index,
+ module, policy, extack);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
return err;
}
EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
- u8 module,
+ u8 slot_index, u8 module,
bool *p_has_temp_sensor)
{
char mtbr_pl[MLXSW_REG_MTBR_LEN];
u16 temp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
@@ -681,13 +797,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
- u16 sensor_index, bool enable)
+static int
+mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u16 sensor_index, bool enable)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
enum mlxsw_reg_mtmp_tee tee;
int err, threshold_hi;
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -695,6 +813,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
if (enable) {
err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ slot_index,
sensor_index -
MLXSW_REG_MTMP_MODULE_INDEX_MIN,
SFP_TEMP_HIGH_WARN,
@@ -721,14 +840,16 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
}
-static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
- err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
- &has_temp_sensor);
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index,
+ i, &has_temp_sensor);
if (err)
return err;
@@ -736,7 +857,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
continue;
sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ err = mlxsw_env_temp_event_set(mlxsw_core, slot_index,
+ sensor_index, true);
if (err)
return err;
}
@@ -753,6 +875,7 @@ struct mlxsw_env_module_temp_warn_event {
static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
@@ -761,7 +884,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->max_module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
@@ -769,9 +892,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
sensor_warning =
mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- mutex_lock(&mlxsw_env->module_info_lock);
- is_overheat =
- mlxsw_env->module_info[i].is_overheat;
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ /* MTWE only supports main board. */
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i);
+ is_overheat = module_info->is_overheat;
if ((is_overheat && sensor_warning) ||
(!is_overheat && !sensor_warning)) {
@@ -779,21 +903,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
- mlxsw_env->module_info[i].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
* on.
*/
- mlxsw_env->module_info[i].is_overheat = true;
- mlxsw_env->module_info[i].module_overheat_counter++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = true;
+ module_info->module_overheat_counter++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
}
@@ -837,6 +961,7 @@ static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
struct mlxsw_env_module_plug_unplug_event {
struct mlxsw_env *mlxsw_env;
+ u8 slot_index;
u8 module;
struct work_struct work;
};
@@ -844,6 +969,7 @@ struct mlxsw_env_module_plug_unplug_event {
static void mlxsw_env_pmpe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
bool has_temp_sensor;
u16 sensor_index;
@@ -853,11 +979,16 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[event->module].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core,
+ event->slot_index,
+ event->module);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
- err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core,
+ event->slot_index,
+ event->module,
&has_temp_sensor);
/* Do not disable events on modules without sensors or faulty sensors
* because FW returns errors.
@@ -869,7 +1000,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
goto out;
sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+ mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index,
+ sensor_index, true);
out:
kfree(event);
@@ -879,12 +1011,14 @@ static void
mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
void *priv)
{
+ u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl);
struct mlxsw_env_module_plug_unplug_event *event;
enum mlxsw_reg_pmpe_module_status module_status;
u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
struct mlxsw_env *mlxsw_env = priv;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count ||
+ slot_index >= mlxsw_env->num_of_slots))
return;
module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
@@ -896,6 +1030,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
return;
event->mlxsw_env = mlxsw_env;
+ event->slot_index = slot_index;
event->module = module;
INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
mlxsw_core_schedule_work(&event->work);
@@ -923,14 +1058,16 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
}
static int
-mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i);
mlxsw_reg_pmaos_e_set(pmaos_pl,
MLXSW_REG_PMAOS_E_GENERATE_EVENT);
mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
@@ -942,146 +1079,330 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
}
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter)
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ *p_counter = module_info->module_overheat_counter;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_map);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped--;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_inc;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_inc;
/* Transition to high power mode following first port using the module
* being put administratively up.
*/
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
- NULL);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ false, NULL);
if (err)
goto out_unlock;
out_inc:
- mlxsw_env->module_info[module].num_ports_up++;
+ module_info->num_ports_up++;
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_module_port_up);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- mlxsw_env->module_info[module].num_ports_up--;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_up--;
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_unlock;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_unlock;
/* Transition to low power mode following last port using the module
* being put administratively down.
*/
- __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+ __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true,
+ NULL);
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
+static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env)
+{
+ struct mlxsw_env_module_info *module_info;
+ int i, j;
+
+ for (i = 0; i < env->num_of_slots; i++) {
+ env->line_cards[i] = kzalloc(struct_size(env->line_cards[i],
+ module_info,
+ env->max_module_count),
+ GFP_KERNEL);
+ if (!env->line_cards[i])
+ goto kzalloc_err;
+
+ /* Firmware defaults to high power mode policy where modules
+ * are transitioned to high power mode following plug-in.
+ */
+ for (j = 0; j < env->max_module_count; j++) {
+ module_info = &env->line_cards[i]->module_info[j];
+ module_info->power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+ }
+ }
+
+ return 0;
+
+kzalloc_err:
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+ return -ENOMEM;
+}
+
+static void mlxsw_env_line_cards_free(struct mlxsw_env *env)
+{
+ int i = env->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+}
+
+static int
+mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+ int err;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core,
+ slot_index);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+}
+
static int
-mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ struct mlxsw_env_module_info *module_info;
char pmtm_pl[MLXSW_REG_PMTM_LEN];
int err;
- mlxsw_reg_pmtm_pack(pmtm_pl, 0, i);
+ mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
if (err)
return err;
- mlxsw_env->module_info[i].type =
- mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index,
+ i);
+ module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl);
}
return 0;
}
-int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+static void
+mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_env *env,
+ u8 slot_index)
{
+ int i;
+
+ for (i = 0; i < env->line_cards[slot_index]->module_count; i++) {
+ enum ethtool_module_power_mode_policy policy;
+ struct mlxsw_env_module_info *module_info;
+ struct netlink_ext_ack extack;
+ int err;
+
+ module_info = &env->line_cards[slot_index]->module_info[i];
+ policy = module_info->power_mode_policy;
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core,
+ slot_index, i,
+ policy, &extack);
+ if (err)
+ dev_err(env->bus_info->dev, "%s\n", extack._msg);
+ }
+}
+
+static void
+mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, slot_index);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &mlxsw_env->line_cards[slot_index]->module_count,
+ NULL);
+
+ err = mlxsw_env_module_event_enable(mlxsw_env, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n",
+ slot_index);
+ goto err_mlxsw_env_module_event_enable;
+ }
+ err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n",
+ slot_index);
+ goto err_type_set;
+ }
+
+ mlxsw_env->line_cards[slot_index]->active = true;
+ /* Apply power mode policy. */
+ mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env,
+ slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return;
+
+err_type_set:
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+err_mlxsw_env_module_event_enable:
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static void
+mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+ mlxsw_env->line_cards[slot_index]->active = false;
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+ mlxsw_env->line_cards[slot_index]->module_count = 0;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_active = mlxsw_env_got_active,
+ .got_inactive = mlxsw_env_got_inactive,
+};
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env)
+{
+ u8 module_count, num_of_slots, max_module_count;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
- u8 module_count;
- int i, err;
+ int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count,
+ &num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ max_module_count = num_of_slots ?
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
+ module_count;
- env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ env = kzalloc(struct_size(env, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!env)
return -ENOMEM;
- /* Firmware defaults to high power mode policy where modules are
- * transitioned to high power mode following plug-in.
- */
- for (i = 0; i < module_count; i++)
- env->module_info[i].power_mode_policy =
- ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
-
- mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
- env->module_count = module_count;
+ env->bus_info = bus_info;
+ env->num_of_slots = num_of_slots + 1;
+ env->max_module_count = max_module_count;
+ err = mlxsw_env_line_cards_alloc(env);
+ if (err)
+ goto err_mlxsw_env_line_cards_alloc;
+
+ mutex_init(&env->line_cards_lock);
*p_env = env;
+ err = mlxsw_linecards_event_ops_register(env->core,
+ &mlxsw_env_event_ops, env);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = mlxsw_env_temp_warn_event_register(mlxsw_core);
if (err)
goto err_temp_warn_event_register;
@@ -1090,38 +1411,54 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core);
- if (err)
- goto err_oper_state_event_enable;
-
- err = mlxsw_env_module_temp_event_enable(mlxsw_core);
+ /* Set 'module_count' only for main board. Actual count for line card
+ * is to be set after line card is activated.
+ */
+ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count;
+ /* Enable events only for main board. Line card events are to be
+ * configured only after line card is activated. Before that, access to
+ * modules on line cards is not allowed.
+ */
+ err = mlxsw_env_module_event_enable(env, 0);
if (err)
- goto err_temp_event_enable;
+ goto err_mlxsw_env_module_event_enable;
- err = mlxsw_env_module_type_set(mlxsw_core);
+ err = mlxsw_env_module_type_set(mlxsw_core, 0);
if (err)
goto err_type_set;
+ env->line_cards[0]->active = true;
+
return 0;
err_type_set:
-err_temp_event_enable:
-err_oper_state_event_enable:
+ mlxsw_env_module_event_disable(env, 0);
+err_mlxsw_env_module_event_enable:
mlxsw_env_module_plug_event_unregister(env);
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+err_linecards_event_ops_register:
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+err_mlxsw_env_line_cards_alloc:
kfree(env);
return err;
}
void mlxsw_env_fini(struct mlxsw_env *env)
{
+ env->line_cards[0]->active = false;
+ mlxsw_env_module_event_disable(env, 0);
mlxsw_env_module_plug_event_unregister(env);
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index ec6564e5d2ee..a197e3ae069c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -9,49 +9,60 @@
struct ethtool_modinfo;
struct ethtool_eeprom;
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp);
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, int module, int off,
+ int *temp);
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data);
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module,
- u32 *flags);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
enum ethtool_module_power_mode_policy policy,
struct netlink_ext_ack *extack);
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter);
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+int mlxsw_env_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 8b170ad92302..70735068cf29 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -19,6 +19,7 @@
#define MLXSW_HWMON_ATTR_PER_SENSOR 3
#define MLXSW_HWMON_ATTR_PER_MODULE 7
#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16
#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
@@ -27,7 +28,7 @@
struct mlxsw_hwmon_attr {
struct device_attribute dev_attr;
- struct mlxsw_hwmon *hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev;
unsigned int type_index;
char name[32];
};
@@ -40,9 +41,9 @@ static int mlxsw_hwmon_get_attr_index(int index, int count)
return index;
}
-struct mlxsw_hwmon {
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
+struct mlxsw_hwmon_dev {
+ char name[MLXSW_HWMON_DEV_NAME_LEN_MAX];
+ struct mlxsw_hwmon *hwmon;
struct device *hwmon_dev;
struct attribute_group group;
const struct attribute_group *groups[2];
@@ -51,6 +52,14 @@ struct mlxsw_hwmon {
unsigned int attrs_count;
u8 sensor_count;
u8 module_sensor_max;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_hwmon {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct mlxsw_hwmon_dev line_cards[];
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -59,14 +68,16 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -82,14 +93,16 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -105,8 +118,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
int index;
int err;
@@ -118,8 +132,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -140,7 +155,8 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
@@ -159,7 +175,8 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -180,7 +197,8 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
@@ -200,7 +218,8 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -226,14 +245,16 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(dev, "Failed to query module temperature\n");
@@ -263,15 +284,16 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
@@ -305,13 +327,16 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_WARN,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -339,13 +364,16 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_ALARM,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -387,9 +415,9 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
int index = mlxsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_max + 1;
+ mlxsw_hwmon_dev->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -458,14 +486,15 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
-static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
+static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev,
enum mlxsw_hwmon_attr_type attr_type,
- unsigned int type_index, unsigned int num) {
+ unsigned int type_index, unsigned int num)
+{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr;
unsigned int attr_index;
- attr_index = mlxsw_hwmon->attrs_count;
- mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index];
+ attr_index = mlxsw_hwmon_dev->attrs_count;
+ mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index];
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
@@ -565,16 +594,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
}
mlxsw_hwmon_attr->type_index = type_index;
- mlxsw_hwmon_attr->hwmon = mlxsw_hwmon;
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev;
mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name;
sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr);
- mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
- mlxsw_hwmon->attrs_count++;
+ mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
+ mlxsw_hwmon_dev->attrs_count++;
}
-static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
int i;
int err;
@@ -584,10 +614,12 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
return err;
}
- mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
- for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
+ mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) {
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl,
+ mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
mtmp_pl);
@@ -602,18 +634,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
i);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
}
return 0;
}
-static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
enum mlxsw_reg_mfcr_pwm_frequency freq;
unsigned int type_index;
@@ -631,10 +664,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
if (tacho_active & BIT(type_index)) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
type_index, num);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
type_index, num++);
}
@@ -642,54 +675,55 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
if (pwm_active & BIT(type_index))
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_PWM,
type_index, num++);
}
return 0;
}
-static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 module_sensor_max;
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &module_sensor_max);
+ &module_sensor_max, NULL);
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
- module_sensor_max;
- for (i = mlxsw_hwmon->sensor_count;
- i < mlxsw_hwmon->module_sensor_max; i++) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon_dev->sensor_count;
+ i < mlxsw_hwmon_dev->module_sensor_max; i++) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
i, i);
}
@@ -697,8 +731,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
-static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -706,22 +741,24 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 gbox_num;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL,
+ NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_max;
- max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
+ index = mlxsw_hwmon_dev->module_sensor_max;
+ max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_max +
+ sensor_index = index % mlxsw_hwmon_dev->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -729,15 +766,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
sensor_index);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
- index, index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
index, index);
index++;
@@ -746,51 +783,144 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
+static void
+mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+ struct device *dev;
+ int err;
+
+ dev = hwmon->bus_info->dev;
+ linecard = &hwmon->line_cards[slot_index];
+ if (linecard->active)
+ return;
+ /* For the main board, module sensor indexes start from 1, sensor index
+ * 0 is used for the ASIC. Use the same numbering for line cards.
+ */
+ linecard->sensor_count = 1;
+ linecard->slot_index = slot_index;
+ linecard->hwmon = hwmon;
+ err = mlxsw_hwmon_module_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_hwmon_gearbox_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->groups[0] = &linecard->group;
+ linecard->group.attrs = linecard->attrs;
+ sprintf(linecard->name, "%s#%02u", "linecard", slot_index);
+ linecard->hwmon_dev =
+ hwmon_device_register_with_groups(dev, linecard->name,
+ linecard, linecard->groups);
+ if (IS_ERR(linecard->hwmon_dev)) {
+ dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->active = true;
+}
+
+static void
+mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+
+ linecard = &hwmon->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ hwmon_device_unregister(linecard->hwmon_dev);
+ /* Reset attributes counter */
+ linecard->attrs_count = 0;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = {
+ .got_active = mlxsw_hwmon_got_active,
+ .got_inactive = mlxsw_hwmon_got_inactive,
+};
+
int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_hwmon **p_hwmon)
{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_hwmon *mlxsw_hwmon;
struct device *hwmon_dev;
+ u8 num_of_slots;
int err;
- mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards,
+ num_of_slots + 1), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
+
mlxsw_hwmon->core = mlxsw_core;
mlxsw_hwmon->bus_info = mlxsw_bus_info;
+ mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon;
+ mlxsw_hwmon->line_cards[0].slot_index = 0;
- err = mlxsw_hwmon_temp_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_init;
- err = mlxsw_hwmon_fans_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_fans_init;
- err = mlxsw_hwmon_module_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_module_init;
- err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_gearbox_init;
- mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
- mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
+ mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group;
+ mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs;
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw", mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ "mlxsw",
+ &mlxsw_hwmon->line_cards[0],
+ mlxsw_hwmon->line_cards[0].groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
}
- mlxsw_hwmon->hwmon_dev = hwmon_dev;
+ err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops,
+ mlxsw_hwmon);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev;
+ mlxsw_hwmon->line_cards[0].active = true;
*p_hwmon = mlxsw_hwmon;
return 0;
+err_linecards_event_ops_register:
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
err_hwmon_register:
err_temp_gearbox_init:
err_temp_module_init:
@@ -802,6 +932,9 @@ err_temp_init:
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
{
- hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ mlxsw_hwmon->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops, mlxsw_hwmon);
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
kfree(mlxsw_hwmon);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
new file mode 100644
index 000000000000..2abd31a62776
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+struct mlxsw_linecard_ini_file {
+ __le16 size;
+ union {
+ u8 data[0];
+ struct {
+ __be16 hw_revision;
+ __be16 ini_version;
+ u8 __dontcare[3];
+ u8 type;
+ u8 name[20];
+ } format;
+ };
+};
+
+struct mlxsw_linecard_types_info {
+ struct mlxsw_linecard_ini_file **ini_files;
+ unsigned int count;
+ size_t data_size;
+ char *data;
+};
+
+#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC)
+
+static void
+mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ linecard->status_event_type_to = status_event_type;
+ mlxsw_core_schedule_dw(&linecard->status_event_to_dw,
+ msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO));
+}
+
+static void
+mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ if (linecard->status_event_type_to == status_event_type)
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+}
+
+static const char *
+mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ int i;
+
+ types_info = linecards->types_info;
+ if (!types_info)
+ return NULL;
+ for (i = 0; i < types_info->count; i++) {
+ ini_file = linecards->types_info->ini_files[i];
+ if (ini_file->format.type == card_type)
+ return ini_file->format.name;
+ }
+ return NULL;
+}
+
+static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return ERR_PTR(err);
+ mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name);
+ return linecard->name;
+}
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+};
+
+struct mlxsw_linecard_device {
+ struct list_head list;
+ u8 index;
+ struct mlxsw_linecard *linecard;
+ struct devlink_linecard_device *devlink_device;
+ struct mlxsw_linecard_device_info info;
+};
+
+static struct mlxsw_linecard_device *
+mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index)
+{
+ struct mlxsw_linecard_device *device;
+
+ list_for_each_entry(device, &linecard->device_list, list)
+ if (device->index == index)
+ return device;
+ return NULL;
+}
+
+static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ u8 device_index, bool flash_owner)
+{
+ struct mlxsw_linecard_device *device;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+ device->index = device_index;
+ device->linecard = linecard;
+
+ device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard,
+ device_index, device);
+ if (IS_ERR(device->devlink_device)) {
+ err = PTR_ERR(device->devlink_device);
+ goto err_devlink_linecard_device_attach;
+ }
+
+ list_add_tail(&device->list, &linecard->device_list);
+ return 0;
+
+err_devlink_linecard_device_attach:
+ kfree(device);
+ return err;
+}
+
+static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct mlxsw_linecard_device *device)
+{
+ list_del(&device->list);
+ devlink_linecard_device_destroy(linecard->devlink_linecard,
+ device->devlink_device);
+ kfree(device);
+}
+
+static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &linecard->device_list, list)
+ mlxsw_linecard_device_detach(mlxsw_core, linecard, device);
+}
+
+static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+ int err;
+
+ do {
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index, NULL,
+ NULL, NULL);
+ if (!data_valid)
+ break;
+ err = mlxsw_linecard_device_attach(mlxsw_core, linecard,
+ device_index, flash_owner);
+ if (err)
+ goto rollback;
+ } while (msg_seq);
+
+ return 0;
+
+rollback:
+ mlxsw_linecard_devices_detach(linecard);
+ return err;
+}
+
+static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard,
+ u8 device_index,
+ struct mlxsw_linecard_device_info *info)
+{
+ struct mlxsw_linecard_device *device;
+
+ device = mlxsw_linecard_device_lookup(linecard, device_index);
+ if (!device)
+ return;
+ device->info = *info;
+}
+
+static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, NULL,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ mlxsw_linecard_device_update(linecard, device_index, &info);
+ } while (msg_seq);
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device,
+ void *priv, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_device *device = priv;
+ struct mlxsw_linecard_device_info *info;
+ struct mlxsw_linecard *linecard;
+ char buf[32];
+
+ linecard = device->linecard;
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ mutex_unlock(&linecard->lock);
+ return 0;
+ }
+
+ info = &device->info;
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ mutex_unlock(&linecard->lock);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
+{
+ linecard->provisioned = false;
+ linecard->ready = false;
+ linecard->active = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_fail(linecard->devlink_linecard);
+}
+
+struct mlxsw_linecards_event_ops_item {
+ struct list_head list;
+ const struct mlxsw_linecards_event_ops *event_ops;
+ void *priv;
+};
+
+static void
+mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard,
+ mlxsw_linecards_event_op_t *op, void *priv)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+
+ if (!op)
+ return;
+ op(mlxsw_core, linecard->slot_index, priv);
+}
+
+static void
+mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+static void
+mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item;
+
+ if (!linecards)
+ return 0;
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->event_ops = ops;
+ item->priv = priv;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_add_tail(&item->list, &linecards->event_ops_list);
+ mutex_unlock(&linecards->event_ops_list_lock);
+ mlxsw_linecards_event_ops_register_call(linecards, item);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_register);
+
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item, *tmp;
+ bool found = false;
+
+ if (!linecards)
+ return;
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) {
+ if (item->event_ops == ops && item->priv == priv) {
+ list_del(&item->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&linecards->event_ops_list_lock);
+
+ if (!found)
+ return;
+ mlxsw_linecards_event_ops_unregister_call(linecards, item);
+ kfree(item);
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+
+static int
+mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
+ u16 hw_revision, u16 ini_version)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ const char *type;
+ int err;
+
+ type = mlxsw_linecard_types_lookup(linecards, card_type);
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ if (!type) {
+ /* It is possible for a line card to be provisioned before
+ * driver initialization. Due to a missing INI bundle file
+ * or an outdated one, the queried card's type might not
+ * be recognized by the driver. In this case, try to query
+ * the card's name from the device.
+ */
+ type = mlxsw_linecard_type_name(linecard);
+ if (IS_ERR(type)) {
+ mlxsw_linecard_provision_fail(linecard);
+ return PTR_ERR(type);
+ }
+ }
+ err = mlxsw_linecard_devices_attach(linecard);
+ if (err) {
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+ linecard->provisioned = true;
+ linecard->hw_revision = hw_revision;
+ linecard->ini_version = ini_version;
+ devlink_linecard_provision_set(linecard->devlink_linecard, type);
+ return 0;
+}
+
+static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ linecard->provisioned = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_clear(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = true;
+ return 0;
+}
+
+static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = false;
+ return 0;
+}
+
+static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard)
+{
+ int err;
+
+ err = mlxsw_linecard_devices_update(linecard);
+ if (err)
+ return err;
+
+ mlxsw_linecard_active_ops_call(linecard);
+ linecard->active = true;
+ devlink_linecard_activate(linecard->devlink_linecard);
+ return 0;
+}
+
+static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_inactive_ops_call(linecard);
+ linecard->active = false;
+ devlink_linecard_deactivate(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard,
+ const char *mddq_pl)
+{
+ enum mlxsw_reg_mddq_slot_info_ready ready;
+ bool provisioned, sr_valid, active;
+ u16 ini_version, hw_revision;
+ u8 slot_index, card_type;
+ int err = 0;
+
+ mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned,
+ &sr_valid, &ready, &active,
+ &hw_revision, &ini_version,
+ &card_type);
+
+ if (linecard) {
+ if (WARN_ON(slot_index != linecard->slot_index))
+ return -EINVAL;
+ } else {
+ if (WARN_ON(slot_index > linecards->count))
+ return -EINVAL;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ }
+
+ mutex_lock(&linecard->lock);
+
+ if (provisioned && linecard->provisioned != provisioned) {
+ err = mlxsw_linecard_provision_set(linecard, card_type,
+ hw_revision, ini_version);
+ if (err)
+ goto out;
+ }
+
+ if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) {
+ err = mlxsw_linecard_ready_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (active && linecard->active != active) {
+ err = mlxsw_linecard_active_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!active && linecard->active != active)
+ mlxsw_linecard_active_clear(linecard);
+
+ if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY &&
+ linecard->ready) {
+ err = mlxsw_linecard_ready_clear(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!provisioned && linecard->provisioned != provisioned)
+ mlxsw_linecard_provision_clear(linecard);
+
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+
+ return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
+}
+
+static const char * const mlxsw_linecard_status_event_type_name[] = {
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
+};
+
+static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
+{
+ struct mlxsw_linecard *linecard =
+ container_of(work, struct mlxsw_linecard,
+ status_event_to_dw.work);
+
+ mutex_lock(&linecard->lock);
+ dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event",
+ linecard->slot_index,
+ mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard)
+{
+ dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error",
+ linecard->slot_index);
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false);
+ return mlxsw_reg_write(linecard->linecards->mlxsw_core,
+ MLXSW_REG(mbct), linecard->mbct_pl);
+}
+
+static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_fsm_state fsm_state)
+{
+ if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR)
+ return 0;
+ return __mlxsw_linecard_fix_fsm_state(linecard);
+}
+
+static int
+mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_status *status,
+ enum mlxsw_reg_mbct_fsm_state *fsm_state,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS, false);
+ err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state);
+ return err;
+}
+
+static int
+mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ const struct mlxsw_linecard_ini_file *ini_file,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ size_t size_left;
+ const u8 *data;
+ int err;
+
+ size_left = le16_to_cpu(ini_file->size);
+ data = ini_file->data;
+ while (size_left) {
+ size_t data_size = MLXSW_REG_MBCT_DATA_LEN;
+ bool is_last = false;
+
+ if (size_left <= MLXSW_REG_MBCT_DATA_LEN) {
+ data_size = size_left;
+ is_last = true;
+ }
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, false);
+ mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size,
+ is_last, data);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL,
+ &status, &fsm_state);
+ if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) ||
+ (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data");
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+ }
+ size_left -= data_size;
+ data += data_size;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ switch (status) {
+ case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE:
+ break;
+ default:
+ /* Should not happen */
+ fallthrough;
+ case MLXSW_REG_MBCT_STATUS_ERASE_FAILED:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI");
+ goto fix_fsm_err_out;
+ case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used");
+ goto fix_fsm_err_out;
+ }
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core,
+ const char *mbct_pl)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ struct mlxsw_linecard *linecard;
+ u8 slot_index;
+
+ mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state);
+ if (WARN_ON(slot_index > linecards->count))
+ return;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mutex_lock(&linecard->lock);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI",
+ linecard->slot_index);
+ goto fix_fsm_out;
+ }
+ mutex_unlock(&linecard->lock);
+ return;
+
+fix_fsm_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ACTIVATE, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI");
+ goto fix_fsm_err_out;
+ }
+
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+#define MLXSW_LINECARD_INI_WAIT_RETRIES 10
+#define MLXSW_LINECARD_INI_WAIT_MS 500
+
+static int
+mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ unsigned int ini_wait_retries = 0;
+ int err;
+
+query_ini_status:
+ err = mlxsw_linecard_query_ini_status(linecard, &status,
+ &fsm_state, extack);
+ if (err)
+ return err;
+
+ switch (fsm_state) {
+ case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE:
+ if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused");
+ return -EINVAL;
+ }
+ mdelay(MLXSW_LINECARD_INI_WAIT_MS);
+ goto query_ini_status;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool mlxsw_linecard_port_selector(void *priv, u16 local_port)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+ return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port);
+}
+
+static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard,
+ ini_file, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard,
+ void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ mlxsw_core_ports_remove_selected(mlxsw_core,
+ mlxsw_linecard_port_selector,
+ linecard);
+
+ err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ bool ret;
+
+ mutex_lock(&linecard->lock);
+ ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) &&
+ linecard->ini_version == be16_to_cpu(ini_file->format.ini_version);
+ mutex_unlock(&linecard->lock);
+ return ret;
+}
+
+static unsigned int
+mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard,
+ void *priv)
+{
+ struct mlxsw_linecard *linecard = priv;
+
+ return linecard->linecards->types_info ?
+ linecard->linecards->types_info->count : 0;
+}
+
+static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard,
+ void *priv, unsigned int index,
+ const char **type, const void **type_priv)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ struct mlxsw_linecard *linecard = priv;
+
+ types_info = linecard->linecards->types_info;
+ if (WARN_ON_ONCE(!types_info))
+ return;
+ ini_file = types_info->ini_files[index];
+ *type = ini_file->format.name;
+ *type_priv = ini_file;
+}
+
+static int
+mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->provisioned) {
+ err = 0;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static const struct devlink_linecard_ops mlxsw_linecard_ops = {
+ .provision = mlxsw_linecard_provision,
+ .unprovision = mlxsw_linecard_unprovision,
+ .same_provision = mlxsw_linecard_same_provision,
+ .types_count = mlxsw_linecard_types_count,
+ .types_get = mlxsw_linecard_types_get,
+ .info_get = mlxsw_linecard_info_get,
+ .device_info_get = mlxsw_linecard_device_info_get,
+};
+
+struct mlxsw_linecard_status_event {
+ struct mlxsw_core *mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_status_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_status_event, work);
+ mlxsw_core = event->mlxsw_core;
+ linecards = mlxsw_core_linecards(mlxsw_core);
+ mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg,
+ char *mddq_pl, void *priv)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+struct mlxsw_linecard_bct_event {
+ struct mlxsw_core *mlxsw_core;
+ char mbct_pl[MLXSW_REG_MBCT_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_bct_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_bct_event, work);
+ mlxsw_core = event->mlxsw_core;
+ mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg,
+ char *mbct_pl, void *priv)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_linecard_listener[] = {
+ MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC),
+ MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE),
+};
+
+static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ bool enable)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+}
+
+static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct devlink_linecard *devlink_linecard;
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ linecard->slot_index = slot_index;
+ linecard->linecards = linecards;
+ mutex_init(&linecard->lock);
+ INIT_LIST_HEAD(&linecard->device_list);
+
+ devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
+ if (IS_ERR(devlink_linecard)) {
+ err = PTR_ERR(devlink_linecard);
+ goto err_devlink_linecard_create;
+ }
+ linecard->devlink_linecard = devlink_linecard;
+ INIT_DELAYED_WORK(&linecard->status_event_to_dw,
+ &mlxsw_linecard_status_event_to_work);
+
+ err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
+ if (err)
+ goto err_event_delivery_set;
+
+ err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ if (err)
+ goto err_status_get_and_process;
+
+ return 0;
+
+err_status_get_and_process:
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+err_event_delivery_set:
+ devlink_linecard_destroy(linecard->devlink_linecard);
+err_devlink_linecard_create:
+ mutex_destroy(&linecard->lock);
+ return err;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+/* LINECARDS INI BUNDLE FILE
+ * +----------------------------------+
+ * | MAGIC ("NVLCINI+") |
+ * +----------------------------------+ +--------------------+
+ * | INI 0 +---> | __le16 size |
+ * +----------------------------------+ | __be16 hw_revision |
+ * | INI 1 | | __be16 ini_version |
+ * +----------------------------------+ | u8 __dontcare[3] |
+ * | ... | | u8 type |
+ * +----------------------------------+ | u8 name[20] |
+ * | INI N | | ... |
+ * +----------------------------------+ +--------------------+
+ */
+
+#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+"
+
+static int
+mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ struct mlxsw_linecard_ini_file *ini_file;
+ size_t size = types_info->data_size;
+ const u8 *data = types_info->data;
+ unsigned int count = 0;
+ u16 ini_file_size;
+
+ if (size < magic_size) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n");
+ return -EINVAL;
+ }
+ if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n");
+ return -EINVAL;
+ }
+
+ data += magic_size;
+ size -= magic_size;
+
+ while (size > 0) {
+ if (size < sizeof(*ini_file)) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n");
+ return -EINVAL;
+ }
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ if (ini_file_size + sizeof(__le16) > size) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n");
+ return -EINVAL;
+ }
+ if (ini_file_size % 4) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n");
+ return -EINVAL;
+ }
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+ if (!count) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n");
+ return -EINVAL;
+ }
+ types_info->count = count;
+ return 0;
+}
+
+static void
+mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ size_t size = types_info->data_size - magic_size;
+ const u8 *data = types_info->data + magic_size;
+ struct mlxsw_linecard_ini_file *ini_file;
+ unsigned int count = 0;
+ u16 ini_file_size;
+ int i;
+
+ while (size) {
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ for (i = 0; i < ini_file_size / 4; i++) {
+ u32 *val = &((u32 *) ini_file->data)[i];
+
+ *val = swab32(*val);
+ }
+ types_info->ini_files[count] = ini_file;
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+}
+
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \
+ "mellanox/lc_ini_bundle_%u_%u.bin"
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \
+ (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4)
+
+static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev;
+ char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN];
+ struct mlxsw_linecard_types_info *types_info;
+ const struct firmware *firmware;
+ int err;
+
+ err = snprintf(filename, sizeof(filename),
+ MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT,
+ rev->minor, rev->subminor);
+ WARN_ON(err >= sizeof(filename));
+
+ err = request_firmware_direct(&firmware, filename,
+ linecards->bus_info->dev);
+ if (err) {
+ dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n",
+ filename);
+ return 0;
+ }
+
+ types_info = kzalloc(sizeof(*types_info), GFP_KERNEL);
+ if (!types_info) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+ linecards->types_info = types_info;
+
+ types_info->data_size = firmware->size;
+ types_info->data = vmalloc(types_info->data_size);
+ if (!types_info->data) {
+ err = -ENOMEM;
+ release_firmware(firmware);
+ goto err_data_alloc;
+ }
+ memcpy(types_info->data, firmware->data, types_info->data_size);
+ release_firmware(firmware);
+
+ err = mlxsw_linecard_types_file_validate(linecards, types_info);
+ if (err) {
+ err = 0;
+ goto err_type_file_file_validate;
+ }
+
+ types_info->ini_files = kmalloc_array(types_info->count,
+ sizeof(struct mlxsw_linecard_ini_file *),
+ GFP_KERNEL);
+ if (!types_info->ini_files) {
+ err = -ENOMEM;
+ goto err_ini_files_alloc;
+ }
+
+ mlxsw_linecard_types_file_parse(types_info);
+
+ return 0;
+
+err_ini_files_alloc:
+err_type_file_file_validate:
+ vfree(types_info->data);
+err_data_alloc:
+ kfree(types_info);
+ return err;
+}
+
+static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards)
+{
+ struct mlxsw_linecard_types_info *types_info = linecards->types_info;
+
+ if (!types_info)
+ return;
+ kfree(types_info->ini_files);
+ vfree(types_info->data);
+ kfree(types_info);
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_linecards *linecards;
+ u8 slot_count;
+ int err;
+ int i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ NULL, &slot_count);
+ if (!slot_count)
+ return 0;
+
+ linecards = vzalloc(struct_size(linecards, linecards, slot_count));
+ if (!linecards)
+ return -ENOMEM;
+ linecards->count = slot_count;
+ linecards->mlxsw_core = mlxsw_core;
+ linecards->bus_info = bus_info;
+ INIT_LIST_HEAD(&linecards->event_ops_list);
+ mutex_init(&linecards->event_ops_list_lock);
+
+ err = mlxsw_linecard_types_init(mlxsw_core, linecards);
+ if (err)
+ goto err_types_init;
+
+ err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ if (err)
+ goto err_traps_register;
+
+ mlxsw_core_linecards_set(mlxsw_core, linecards);
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1);
+ if (err)
+ goto err_linecard_init;
+ }
+
+ return 0;
+
+err_linecard_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+err_traps_register:
+ mlxsw_linecard_types_fini(linecards);
+err_types_init:
+ vfree(linecards);
+ return err;
+}
+
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ if (!linecards)
+ return;
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ mlxsw_linecard_types_fini(linecards);
+ mutex_destroy(&linecards->event_ops_list_lock);
+ WARN_ON(!list_empty(&linecards->event_ops_list));
+ vfree(linecards);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 05f54bd982c0..3548fe1df7c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,7 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
@@ -82,6 +81,16 @@ struct mlxsw_thermal_module {
struct thermal_zone_device *tzdev;
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
int module; /* Module or gearbox number */
+ u8 slot_index;
+};
+
+struct mlxsw_thermal_area {
+ struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ u8 slot_index;
+ bool active;
};
struct mlxsw_thermal {
@@ -92,12 +101,9 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- struct mlxsw_thermal_module *tz_module_arr;
- u8 tz_module_num;
- struct mlxsw_thermal_module *tz_gearbox_arr;
- u8 tz_gearbox_num;
unsigned int tz_highest_score;
struct thermal_zone_device *tz_highest_dev;
+ struct mlxsw_thermal_area line_cards[];
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -123,8 +129,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
- if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- strlen(cdev->type)))
+ if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i]))
return 0;
}
@@ -150,13 +155,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
* EEPROM if we got valid thresholds from MTMP.
*/
if (!emerg_temp || !crit_temp) {
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_WARN,
&crit_temp);
if (err)
return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_ALARM,
&emerg_temp);
if (err)
@@ -271,7 +278,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
int temp;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -423,15 +430,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
static void
mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
- u16 sensor_index, int *p_temp,
- int *p_crit_temp,
+ u8 slot_index, u16 sensor_index,
+ int *p_temp, int *p_crit_temp,
int *p_emerg_temp)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int err;
/* Read module temperature and thresholds. */
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index,
+ false, false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
/* Set temperature and thresholds to zero to avoid passing
@@ -462,6 +470,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
/* Read module temperature and thresholds. */
mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ tz->slot_index,
sensor_index, &temp,
&crit_temp, &emerg_temp);
*p_temp = temp;
@@ -576,7 +585,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
int err;
index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -672,11 +681,15 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int err;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
- module_tz->module + 1);
+ if (module_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d",
+ module_tz->slot_index, module_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
module_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -704,25 +717,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 module)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
int dummy_temp, crit_temp, emerg_temp;
u16 sensor_index;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
- module_tz = &thermal->tz_module_arr[module];
+ module_tz = &area->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
return 0;
module_tz->module = module;
+ module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Read module temperature and thresholds. */
- mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index,
+ sensor_index, &dummy_temp,
&crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
return mlxsw_thermal_module_trips_update(dev, core, module_tz,
@@ -740,34 +756,39 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &thermal->tz_module_num);
+ &area->tz_module_num, NULL);
+
+ /* For modular system module counter could be zero. */
+ if (!area->tz_module_num)
+ return 0;
- thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
- sizeof(*thermal->tz_module_arr),
- GFP_KERNEL);
- if (!thermal->tz_module_arr)
+ area->tz_module_arr = kcalloc(area->tz_module_num,
+ sizeof(*area->tz_module_arr),
+ GFP_KERNEL);
+ if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_init(dev, core, thermal, i);
+ for (i = 0; i < area->tz_module_num; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, area, i);
if (err)
goto err_thermal_module_init;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- module_tz = &thermal->tz_module_arr[i];
+ for (i = 0; i < area->tz_module_num; i++) {
+ module_tz = &area->tz_module_arr[i];
if (!module_tz->parent)
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
@@ -779,30 +800,35 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
err_thermal_module_tz_init:
err_thermal_module_init:
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
return err;
}
static void
-mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
}
static int
mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int ret;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
- gearbox_tz->module + 1);
+ if (gearbox_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d",
+ gearbox_tz->slot_index, gearbox_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -828,7 +854,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
@@ -837,30 +864,31 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
- NULL);
+ NULL, NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- thermal->tz_gearbox_num = gbox_num;
- thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
- sizeof(*thermal->tz_gearbox_arr),
- GFP_KERNEL);
- if (!thermal->tz_gearbox_arr)
+ area->tz_gearbox_num = gbox_num;
+ area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num,
+ sizeof(*area->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!area->tz_gearbox_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_gearbox_num; i++) {
- gearbox_tz = &thermal->tz_gearbox_arr[i];
+ for (i = 0; i < area->tz_gearbox_num; i++) {
+ gearbox_tz = &area->tz_gearbox_arr[i];
memcpy(gearbox_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
+ gearbox_tz->slot_index = area->slot_index;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
goto err_thermal_gearbox_tz_init;
@@ -870,21 +898,80 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
return err;
}
static void
-mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ for (i = area->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+}
+
+static void
+mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+ int err;
+
+ linecard = &thermal->line_cards[slot_index];
+
+ if (linecard->active)
+ return;
+
+ linecard->slot_index = slot_index;
+ err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core,
+ thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev,
+ thermal->core, thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n",
+ slot_index);
+ goto err_thermal_linecard_gearboxes_init;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_thermal_linecard_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static void
+mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+
+ linecard = &thermal->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ mlxsw_thermal_gearboxes_fini(thermal, linecard);
+ mlxsw_thermal_modules_fini(thermal, linecard);
}
+static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = {
+ .got_active = mlxsw_thermal_got_active,
+ .got_inactive = mlxsw_thermal_got_inactive,
+};
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -892,19 +979,29 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
enum mlxsw_reg_mfcr_pwm_frequency freq;
struct device *dev = bus_info->dev;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_thermal *thermal;
+ u8 pwm_active, num_of_slots;
u16 tacho_active;
- u8 pwm_active;
int err, i;
- thermal = devm_kzalloc(dev, sizeof(*thermal),
- GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!thermal)
return -ENOMEM;
thermal->core = core;
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
@@ -970,25 +1067,38 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
goto err_thermal_zone_device_register;
}
- err = mlxsw_thermal_modules_init(dev, core, thermal);
+ err = mlxsw_thermal_modules_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_modules_init;
- err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_gearboxes_init;
+ err = mlxsw_linecards_event_ops_register(core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
goto err_thermal_zone_device_enable;
+ thermal->line_cards[0].active = true;
*p_thermal = thermal;
return 0;
err_thermal_zone_device_enable:
- mlxsw_thermal_gearboxes_fini(thermal);
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+err_linecards_event_ops_register:
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
err_thermal_gearboxes_init:
- mlxsw_thermal_modules_fini(thermal);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
@@ -1001,7 +1111,7 @@ err_thermal_cooling_device_register:
thermal_cooling_device_unregister(thermal->cdevs[i]);
err_reg_write:
err_reg_query:
- devm_kfree(dev, thermal);
+ kfree(thermal);
return err;
}
@@ -1009,8 +1119,12 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
- mlxsw_thermal_gearboxes_fini(thermal);
- mlxsw_thermal_modules_fini(thermal);
+ thermal->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
@@ -1023,5 +1137,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
}
}
- devm_kfree(thermal->bus_info->dev, thermal);
+ kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3bc012dafd08..d9660d4cce96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -59,7 +59,8 @@ static int mlxsw_m_port_open(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+ return mlxsw_env_module_port_up(mlxsw_m->core, 0,
+ mlxsw_m_port->module);
}
static int mlxsw_m_port_stop(struct net_device *dev)
@@ -67,7 +68,7 @@ static int mlxsw_m_port_stop(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
+ mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module);
return 0;
}
@@ -110,7 +111,7 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module,
modinfo);
}
@@ -121,8 +122,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module,
- ee, data);
+ return mlxsw_env_get_module_eeprom(netdev, core, 0,
+ mlxsw_m_port->module, ee, data);
}
static int
@@ -133,7 +134,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_eeprom_by_page(core, 0,
+ mlxsw_m_port->module,
page, extack);
}
@@ -142,7 +144,7 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module,
flags);
}
@@ -154,7 +156,7 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module,
params, extack);
}
@@ -166,7 +168,7 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module,
params->policy, extack);
}
@@ -221,7 +223,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -311,7 +313,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, module);
+ mlxsw_env_module_port_map(mlxsw_m->core, 0, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -320,12 +322,13 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -354,6 +357,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0 &&
!mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
@@ -364,6 +368,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
+ devl_unlock(devlink);
return 0;
@@ -373,6 +378,7 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
+ devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -385,8 +391,10 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -394,6 +402,7 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
+ devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
@@ -422,7 +431,6 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
@@ -438,9 +446,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
- devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
@@ -452,11 +458,8 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
- devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 67b1a2f8397f..078e3aa04383 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4325,6 +4325,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
+/* reg_pmlp_slot_index
+ * Module number.
+ * Slot_index
+ * Slot_index = 0 represent the onboard (motherboard).
+ * In case of non-modular system only slot_index = 0 is available.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false);
+
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
@@ -5769,9 +5778,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_slot_index_set(payload, slot_index);
mlxsw_reg_pmaos_module_set(payload, module);
}
@@ -5874,6 +5884,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
}
+/* PMECR - Ports Mapping Event Configuration Register
+ * --------------------------------------------------
+ * The PMECR register is used to enable/disable event triggering
+ * in case of local port mapping change.
+ */
+#define MLXSW_REG_PMECR_ID 0x501B
+#define MLXSW_REG_PMECR_LEN 0x20
+
+MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN);
+
+/* reg_pmecr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12);
+
+/* reg_pmecr_ee
+ * Event update enable. If this bit is set, event generation will be updated
+ * based on the e field. Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1);
+
+/* reg_pmecr_eswi
+ * Software ignore enable bit. If this bit is set, the value of swi is used.
+ * If this bit is clear, the value of swi is ignored.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1);
+
+/* reg_pmecr_swi
+ * Software ignore. If this bit is set, the device shouldn't generate events
+ * in case of PMLP SET operation but only upon self local port mapping change
+ * (if applicable according to e configuration). This is supplementary
+ * configuration on top of e value.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1);
+
+enum mlxsw_reg_pmecr_e {
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmecr_e
+ * Event generation on local port mapping change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_pmecr_e e)
+{
+ MLXSW_REG_ZERO(pmecr, payload);
+ mlxsw_reg_pmecr_local_port_set(payload, local_port);
+ mlxsw_reg_pmecr_e_set(payload, e);
+ mlxsw_reg_pmecr_ee_set(payload, true);
+ mlxsw_reg_pmecr_swi_set(payload, true);
+ mlxsw_reg_pmecr_eswi_set(payload, true);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5984,6 +6057,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
*/
MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+/* reg_pmmp_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4);
+
/* reg_pmmp_sticky
* When set, will keep eeprom_override values after plug-out event.
* Access: OP
@@ -6011,9 +6090,10 @@ enum {
*/
MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_slot_index_set(payload, slot_index);
mlxsw_reg_pmmp_module_set(payload, module);
}
@@ -9721,6 +9801,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+/* reg_mtmp_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4);
+
#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
@@ -9810,11 +9896,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
- bool max_temp_enable,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index,
+ u16 sensor_index, bool max_temp_enable,
bool max_temp_reset)
{
MLXSW_REG_ZERO(mtmp, payload);
+ mlxsw_reg_mtmp_slot_index_set(payload, slot_index);
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
@@ -9880,6 +9967,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+/* reg_mtbr_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4);
+
/* reg_mtbr_base_sensor_index
* Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
@@ -9912,10 +10005,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
- u8 num_rec)
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
+ u16 base_sensor_index, u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
}
@@ -9964,6 +10058,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
*/
MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+/* reg_mcia_slot_index
+ * Slot index (0: Main board)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCIA_STATUS_GOOD = 0,
/* No response from module's EEPROM. */
@@ -10063,11 +10163,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
-static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
- u8 page_number, u16 device_addr,
- u8 size, u8 i2c_device_addr)
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
+ u8 lock, u8 page_number,
+ u16 device_addr, u8 size,
+ u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
@@ -10499,6 +10601,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
*/
MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+/* reg_mcion_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
@@ -10510,9 +10618,10 @@ enum {
*/
MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
-static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_slot_index_set(payload, slot_index);
mlxsw_reg_mcion_module_set(payload, module);
}
@@ -11326,6 +11435,12 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
+/* mgpir_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4);
+
/* mgpir_device_type
* Access: RO
*/
@@ -11343,21 +11458,35 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* max_modules_per_slot
+ * Maximum number of modules that can be connected per slot.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8);
+
+/* mgpir_num_of_slots
+ * Number of slots in the system.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8);
+
/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
-static inline void mlxsw_reg_mgpir_pack(char *payload)
+static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index)
{
MLXSW_REG_ZERO(mgpir, payload);
+ mlxsw_reg_mgpir_slot_index_set(payload, slot_index);
}
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash, u8 *num_of_modules)
+ u8 *devices_per_flash, u8 *num_of_modules,
+ u8 *num_of_slots)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -11368,6 +11497,393 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
mlxsw_reg_mgpir_devices_per_flash_get(payload);
if (num_of_modules)
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
+ if (num_of_slots)
+ *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload);
+}
+
+/* MBCT - Management Binary Code Transfer Register
+ * -----------------------------------------------
+ * This register allows to transfer binary codes from the host to
+ * the management FW by transferring it by chunks of maximum 1KB.
+ */
+#define MLXSW_REG_MBCT_ID 0x9120
+#define MLXSW_REG_MBCT_LEN 0x420
+
+MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN);
+
+/* reg_mbct_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4);
+
+/* reg_mbct_data_size
+ * Actual data field size in bytes for the current data transfer.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11);
+
+enum mlxsw_reg_mbct_op {
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */
+ MLXSW_REG_MBCT_OP_ACTIVATE,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS,
+};
+
+/* reg_mbct_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4);
+
+/* reg_mbct_last
+ * Indicates that the current data field is the last chunk of the INI.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1);
+
+/* reg_mbct_oee
+ * Opcode Event Enable. When set a BCTOE event will be sent once the opcode
+ * was executed and the fsm_state has changed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1);
+
+enum mlxsw_reg_mbct_status {
+ /* Partial data transfer completed successfully and ready for next
+ * data transfer.
+ */
+ MLXSW_REG_MBCT_STATUS_PART_DATA = 2,
+ MLXSW_REG_MBCT_STATUS_LAST_DATA,
+ MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE,
+ /* Error - trying to erase INI while it being used. */
+ MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE,
+ /* Last data transfer completed, applying magic pattern. */
+ MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7,
+ MLXSW_REG_MBCT_STATUS_INI_ERROR,
+ MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED,
+ MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11,
+};
+
+/* reg_mbct_status
+ * Status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5);
+
+enum mlxsw_reg_mbct_fsm_state {
+ MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5,
+ MLXSW_REG_MBCT_FSM_STATE_ERROR,
+};
+
+/* reg_mbct_fsm_state
+ * FSM state.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4);
+
+#define MLXSW_REG_MBCT_DATA_LEN 1024
+
+/* reg_mbct_data
+ * Up to 1KB of data.
+ * Access: WO
+ */
+MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN);
+
+static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mbct_op op, bool oee)
+{
+ MLXSW_REG_ZERO(mbct, payload);
+ mlxsw_reg_mbct_slot_index_set(payload, slot_index);
+ mlxsw_reg_mbct_op_set(payload, op);
+ mlxsw_reg_mbct_oee_set(payload, oee);
+}
+
+static inline void mlxsw_reg_mbct_dt_pack(char *payload,
+ u16 data_size, bool last,
+ const char *data)
+{
+ if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN))
+ return;
+ mlxsw_reg_mbct_data_size_set(payload, data_size);
+ mlxsw_reg_mbct_last_set(payload, last);
+ mlxsw_reg_mbct_data_memcpy_to(payload, data);
+}
+
+static inline void
+mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
+ enum mlxsw_reg_mbct_status *p_status,
+ enum mlxsw_reg_mbct_fsm_state *p_fsm_state)
+{
+ if (p_slot_index)
+ *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload);
+ *p_status = mlxsw_reg_mbct_status_get(payload);
+ if (p_fsm_state)
+ *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
+}
+
+/* MDDQ - Management DownStream Device Query Register
+ * --------------------------------------------------
+ * This register allows to query the DownStream device properties. The desired
+ * information is chosen upon the query_type field and is delivered by 32B
+ * of data blocks.
+ */
+#define MLXSW_REG_MDDQ_ID 0x9161
+#define MLXSW_REG_MDDQ_LEN 0x30
+
+MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN);
+
+/* reg_mddq_sie
+ * Slot info event enable.
+ * When set to '1', each change in the slot_info.provisioned / sr_valid /
+ * active / ready will generate a DSDSC event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
+
+enum mlxsw_reg_mddq_query_type {
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
+};
+
+/* reg_mddq_query_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
+
+/* reg_mddq_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
+/* reg_mddq_slot_info_provisioned
+ * If set, the INI file is applied and the card is provisioned.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1);
+
+/* reg_mddq_slot_info_sr_valid
+ * If set, Shift Register is valid (after being provisioned) and data
+ * can be sent from the switch ASIC to the line-card CPLD over Shift-Register.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1);
+
+enum mlxsw_reg_mddq_slot_info_ready {
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR,
+};
+
+/* reg_mddq_slot_info_lc_ready
+ * If set, the LC is powered on, matching the INI version and a new FW
+ * version can be burnt (if necessary).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2);
+
+/* reg_mddq_slot_info_active
+ * If set, the FW has completed the MDDC.device_enable command.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1);
+
+/* reg_mddq_slot_info_hw_revision
+ * Major user-configured version number of the current INI file.
+ * Valid only when active or ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16);
+
+/* reg_mddq_slot_info_ini_file_version
+ * User-configured version number of the current INI file.
+ * Valid only when active or lc_ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16);
+
+/* reg_mddq_slot_info_card_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8);
+
+static inline void
+__mlxsw_reg_mddq_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mddq_query_type query_type)
+{
+ MLXSW_REG_ZERO(mddq, payload);
+ mlxsw_reg_mddq_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddq_query_type_set(payload, query_type);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO);
+ mlxsw_reg_mddq_sie_set(payload, sie);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
+ bool *p_provisioned, bool *p_sr_valid,
+ enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready,
+ bool *p_active, u16 *p_hw_revision,
+ u16 *p_ini_file_version,
+ u8 *p_card_type)
+{
+ *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload);
+ *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload);
+ *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload);
+ *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload);
+ *p_active = mlxsw_reg_mddq_slot_info_active_get(payload);
+ *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload);
+ *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload);
+ *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
+}
+
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ if (p_flash_owner)
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ if (p_fw_major)
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ if (p_fw_minor)
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ if (p_fw_sub_minor)
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
+#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
+
+/* reg_mddq_slot_ascii_name
+ * Slot's ASCII name.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10,
+ MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN);
+
+static inline void
+mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name)
+{
+ mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name);
+}
+
+/* MDDC - Management DownStream Device Control Register
+ * ----------------------------------------------------
+ * This register allows to control downstream devices and line cards.
+ */
+#define MLXSW_REG_MDDC_ID 0x9163
+#define MLXSW_REG_MDDC_LEN 0x30
+
+MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN);
+
+/* reg_mddc_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4);
+
+/* reg_mddc_rst
+ * Reset request.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1);
+
+/* reg_mddc_device_enable
+ * When set, FW is the manager and allowed to program the downstream device.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1);
+
+static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst,
+ bool device_enable)
+{
+ MLXSW_REG_ZERO(mddc, payload);
+ mlxsw_reg_mddc_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddc_rst_set(payload, rst);
+ mlxsw_reg_mddc_device_enable_set(payload, device_enable);
}
/* MFDE - Monitoring FW Debug Register
@@ -12125,6 +12641,12 @@ static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN);
+/* reg_sbpr_desc
+ * When set, configures descriptor buffer.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, desc, 0x00, 31, 1);
+
/* shared direstion enum for SBPR, SBCM, SBPM */
enum mlxsw_reg_sbxx_dir {
MLXSW_REG_SBXX_DIR_INGRESS,
@@ -12619,6 +13141,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
MLXSW_REG(pmtdb),
+ MLXSW_REG(pmecr),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
@@ -12688,6 +13211,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtptpt),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mbct),
+ MLXSW_REG(mddq),
+ MLXSW_REG(mddc),
MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 8eb05090ffec..ac6348e2ff1f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -89,6 +89,11 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
"." __stringify(MLXSW_SP_FWREV_MINOR) \
"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
+ "mellanox/lc_ini_bundle_" \
+ __stringify(MLXSW_SP_FWREV_MINOR) "_" \
+ __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -481,23 +486,22 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- struct mlxsw_sp_port_mapping *port_mapping)
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, char *pmlp_pl,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
bool separate_rxtx;
+ u8 first_lane;
+ u8 slot_index;
u8 module;
u8 width;
- int err;
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
width = mlxsw_reg_pmlp_width_get(pmlp_pl);
separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+ first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
if (width && !is_power_of_2(width)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
@@ -511,6 +515,11 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
+ if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
+ local_port);
+ return -EINVAL;
+ }
if (separate_rxtx &&
mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
@@ -518,7 +527,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
- if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
local_port);
return -EINVAL;
@@ -526,6 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
port_mapping->module = module;
+ port_mapping->slot_index = slot_index;
port_mapping->width = width;
port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
@@ -533,17 +543,35 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ pmlp_pl, port_mapping);
+}
+
+static int
mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i, err;
- mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
+ port_mapping->slot_index);
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
@@ -554,19 +582,20 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return 0;
err_pmlp_write:
- mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- u8 module)
+ u8 slot_index, u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
@@ -576,6 +605,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
int err;
err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
if (err)
return err;
@@ -587,6 +617,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
err_port_admin_status_set:
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return err;
}
@@ -599,6 +630,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev)
netif_stop_queue(dev);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return 0;
}
@@ -1445,12 +1477,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
u64 overheat_counter;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
- &overheat_counter);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
+ module, &overheat_counter);
if (err)
return err;
@@ -1525,7 +1558,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
splittable = lanes > 1 && !split;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
@@ -1775,13 +1808,16 @@ err_port_label_info_get:
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
+ port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
@@ -1804,7 +1840,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1858,21 +1894,148 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
return mlxsw_sp->ports[local_port] != NULL;
}
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, bool enable)
+{
+ char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+ mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+ enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+ struct list_head list;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ LIST_HEAD(event_queue);
+ u16 local_port;
+ int err;
+
+ events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+ mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+ devlink = priv_to_devlink(mlxsw_sp->core);
+
+ spin_lock_bh(&events->queue_lock);
+ list_splice_init(&events->queue, &event_queue);
+ spin_unlock_bh(&events->queue_lock);
+
+ list_for_each_entry_safe(event, next_event, &event_queue, list) {
+ local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+ err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ event->pmlp_pl, &port_mapping);
+ if (err)
+ goto out;
+
+ if (WARN_ON_ONCE(!port_mapping.width))
+ goto out;
+
+ devl_lock(devlink);
+
+ if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, &port_mapping);
+ else
+ WARN_ON_ONCE(1);
+
+ devl_unlock(devlink);
+
+ mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+ kfree(event);
+ }
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+ char *pmlp_pl, void *priv)
+{
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping_event *event;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+
+ events = &mlxsw_sp->port_mapping_events;
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+ spin_lock(&events->queue_lock);
+ list_add_tail(&event->list, &events->queue);
+ spin_unlock(&events->queue_lock);
+ mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+
+ events = &mlxsw_sp->port_mapping_events;
+
+ /* Caller needs to make sure that no new event is going to appear. */
+ cancel_work_sync(&events->work);
+ list_for_each_entry_safe(event, next_event, &events->queue, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ for (i = 1; i < max_ports; i++)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
+ devl_lock(devlink);
+ for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
+ devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
+static void
+mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+}
+
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
@@ -1883,26 +2046,46 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
+ events = &mlxsw_sp->port_mapping_events;
+ INIT_LIST_HEAD(&events->queue);
+ spin_lock_init(&events->queue_lock);
+ INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+ if (err)
+ goto err_event_enable;
+ }
+
+ devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- port_mapping = mlxsw_sp->port_mapping[i];
- if (!port_mapping)
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ if (!port_mapping->width)
continue;
err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
+ devl_unlock(devlink);
return 0;
err_port_create:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
+ devl_unlock(devlink);
+err_event_enable:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
return err;
@@ -1911,12 +2094,12 @@ err_cpu_port_create:
static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
int err;
mlxsw_sp->port_mapping = kcalloc(max_ports,
- sizeof(struct mlxsw_sp_port_mapping *),
+ sizeof(struct mlxsw_sp_port_mapping),
GFP_KERNEL);
if (!mlxsw_sp->port_mapping)
return -ENOMEM;
@@ -1925,36 +2108,20 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
continue;
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
goto err_port_module_info_get;
- if (!port_mapping.width)
- continue;
-
- mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
- sizeof(port_mapping),
- GFP_KERNEL);
- if (!mlxsw_sp->port_mapping[i]) {
- err = -ENOMEM;
- goto err_port_module_info_dup;
- }
}
return 0;
err_port_module_info_get:
-err_port_module_info_dup:
- for (i--; i >= 1; i--)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
return err;
}
static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- int i;
-
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
}
@@ -2004,8 +2171,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- port_mapping = mlxsw_sp->port_mapping[local_port];
- if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
+ port_mapping = &mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
continue;
mlxsw_sp_port_create(mlxsw_sp, local_port,
false, port_mapping);
@@ -2045,7 +2212,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
return -EINVAL;
}
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2080,6 +2248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
return err;
}
@@ -2109,7 +2278,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
count = mlxsw_sp_port->mapping.module_width /
mlxsw_sp_port->mapping.width;
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2300,6 +2470,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
};
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2818,7 +2993,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -2979,9 +3153,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
- devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
@@ -3094,6 +3266,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3124,6 +3298,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3154,6 +3330,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3162,12 +3340,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
- devl_unlock(devlink);
-
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -3645,6 +3819,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3682,6 +3857,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3717,6 +3893,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -5024,3 +5201,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 20588e699588..2ad29ae1c640 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -145,11 +145,18 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
+ u8 slot_index;
u8 width; /* Number of lanes used by the port */
u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
+struct mlxsw_sp_port_mapping_events {
+ struct list_head queue;
+ spinlock_t queue_lock; /* protects queue */
+ struct work_struct work;
+};
+
struct mlxsw_sp_parsing {
refcount_t parsing_depth_ref;
u16 parsing_depth;
@@ -164,7 +171,8 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- struct mlxsw_sp_port_mapping **port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 31f7f4c3acc3..3b9ba8fa247a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1827,10 +1827,9 @@ static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
void *rule_priv, bool *activity)
{
- struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+ *activity = false;
- return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
- activity);
+ return 0;
}
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 98f26f596e30..c68fc8f7ca99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -202,6 +202,21 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
return 0;
}
+static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode,
+ u32 size, bool infi_size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+
+ /* The FW default descriptor buffer configuration uses only pool 14 for
+ * descriptors.
+ */
+ mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size);
+ mlxsw_reg_sbpr_desc_set(sbpr_pl, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+}
+
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 pg_buff, u32 min_buff, u32 max_buff,
bool infi_max, u16 pool_index)
@@ -775,6 +790,17 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
}
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 5f92b1691360..aff6d4f35cd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -168,8 +168,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
struct dcb_app *app)
{
- int prio;
-
if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
netdev_err(dev, "APP entry with priority value %u is invalid\n",
app->priority);
@@ -183,17 +181,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
app->protocol);
return -EINVAL;
}
-
- /* Warn about any DSCP APP entries with the same PID. */
- prio = fls(dcb_ieee_getapp_mask(dev, app));
- if (prio--) {
- if (prio < app->priority)
- netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
- app->priority, app->protocol, prio);
- else if (prio > app->priority)
- netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
- app->priority, app->protocol, prio);
- }
break;
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 8b5d7f83b9b0..915dffb85a1c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats {
static u64
mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
u64 stats;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
- port_mapping.module,
- &stats);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index,
+ module, &stats);
if (err)
return mlxsw_sp_port->module_overheat_initial_val;
@@ -1036,6 +1036,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module,
modinfo);
}
@@ -1045,10 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index,
+ module, ee, data);
}
static int
@@ -1058,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
- extack);
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
}
static int
@@ -1202,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index,
+ module, flags);
}
static int
@@ -1214,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
- extack);
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params, extack);
}
static int
@@ -1227,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
- params->policy, extack);
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params->policy, extack);
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 79deb19e3a19..9ac4f3c00349 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
@@ -507,7 +508,7 @@ struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
struct fib_info *fi;
u32 tb_id;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -2359,6 +2360,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
goto err_neigh_entry_insert;
mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
+ atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
return neigh_entry;
@@ -2373,6 +2375,7 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
list_del(&neigh_entry->rif_list_node);
+ atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_free(neigh_entry);
@@ -2570,6 +2573,9 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
char *rauhtd_pl;
int err;
+ if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
+ return 0;
+
rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
if (!rauhtd_pl)
return -ENOMEM;
@@ -2949,6 +2955,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_neighs_update_work);
INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
+ atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
@@ -5559,7 +5566,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- return !fib4_entry->tos;
+ return !fib4_entry->dscp;
}
static bool
@@ -5620,7 +5627,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -5645,7 +5652,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
@@ -5668,7 +5675,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
@@ -6250,7 +6257,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
- fib4_entry->tos = fen_info->tos;
+ fib4_entry->dscp = fen_info->dscp;
fib_entry->fib_node = fib_node;
@@ -6304,7 +6311,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib4_entry, common);
if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
+ fib4_entry->dscp == fen_info->dscp &&
fib4_entry->type == fen_info->type &&
fib4_entry->fi == fen_info->fi)
return fib4_entry;
@@ -7010,7 +7017,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
@@ -7019,14 +7026,12 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
if (err)
- goto err_nexthop6_group_update;
+ goto err_rt6_unwind;
return 0;
-err_nexthop6_group_update:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
@@ -7154,7 +7159,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
@@ -7162,7 +7167,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_get;
+ goto err_rt6_unwind;
err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
fib_node->fib);
@@ -7181,10 +7186,8 @@ err_fib6_entry_type_set:
mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
-err_nexthop6_group_get:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index fa829658a11b..6e704d807a78 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -56,6 +56,7 @@ struct mlxsw_sp_router {
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
+ atomic_t neigh_count;
} neighs_update;
struct delayed_work nexthop_probe_dw;
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 3bf12092a8a2..a6d2e806cba9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -207,6 +207,16 @@ static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
}
}
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
+ bool no_delay)
+{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(interval));
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev,
@@ -245,6 +255,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->ops = bridge->bridge_8021d_ops;
}
INIT_LIST_HEAD(&bridge_device->mids_list);
+ if (list_empty(&bridge->bridges_list))
+ mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
/* It is possible we already have VXLAN devices enslaved to the bridge.
@@ -273,6 +285,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
+ if (list_empty(&bridge->bridges_list))
+ cancel_delayed_work(&bridge->fdb_notify.dw);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
@@ -2886,22 +2900,13 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
- bool no_delay)
-{
- struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
- unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
-
- mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
- msecs_to_jiffies(interval));
-}
-
#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
+ bool reschedule = false;
char *sfn_pl;
int queries;
u8 num_rec;
@@ -2916,6 +2921,9 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
+ if (list_empty(&bridge->bridges_list))
+ goto out;
+ reschedule = true;
queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
while (queries > 0) {
mlxsw_reg_sfn_pack(sfn_pl);
@@ -2935,6 +2943,8 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
out:
rtnl_unlock();
kfree(sfn_pl);
+ if (!reschedule)
+ return;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
}
@@ -3665,7 +3675,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
return 0;
err_register_switchdev_blocking_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 47b061b99160..ed4d0d3448f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -864,7 +864,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
.listeners_arr = {
MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
+ true, SP_LLDP, DISCARD),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 9e070ab3ed76..d888498aed33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -133,6 +133,12 @@ enum mlxsw_event_trap_id {
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+ /* Downstream Device Status Change */
+ MLXSW_TRAP_ID_DSDSC = 0x321,
+ /* Binary Code Transfer Operation Executed Event */
+ MLXSW_TRAP_ID_BCTOE = 0x322,
+ /* Port mapping change */
+ MLXSW_TRAP_ID_PMLPE = 0x32E,
};
#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index a9ffc719aa0e..fd2e0ebb2427 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o
+ lan966x_ptp.o lan966x_fdma.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 000000000000..9e2a7323eaf0
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ page = dev_alloc_pages(rx->page_order);
+ if (unlikely(!page))
+ return NULL;
+
+ dma_addr = dma_map_page(lan966x->dev, page, 0,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
+ goto free_page;
+
+ db->dataptr = dma_addr;
+
+ return page;
+
+free_page:
+ __free_pages(page, rx->page_order);
+ return NULL;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ dma_unmap_single(lan966x->dev,
+ (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rx->page[i][j], rx->page_order);
+ }
+ }
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+
+ dcb_buf->used = false;
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->skb->len,
+ DMA_TO_DEVICE);
+ if (!dcb_buf->ptp)
+ dev_kfree_skb_any(dcb_buf->skb);
+
+ clear = true;
+ }
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u64 src_port, timestamp;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto unmap_page;
+
+ dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_src_port(skb->data, &src_port);
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+unmap_page:
+ dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ __free_pages(page, rx->page_order);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ skb = lan966x_fdma_rx_get_frame(rx);
+
+ rx->page[rx->dcb_index][rx->db_index] = NULL;
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+
+ if (!skb)
+ break;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ counter++;
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx_dcb *next_dcb, *dcb;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_db *next_db;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN * sizeof(u32));
+ memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->skb = skb;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ int mtu;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ mtu = lan966x->ports[i]->dev->mtu;
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
+ dma_addr_t rx_dma, tx_dma;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ tx_dma = lan966x->tx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+ tx_dcbs = lan966x->tx.dcbs;
+ tx_dcbs_buf = lan966x->tx.dcbs_buf;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ lan966x_fdma_tx_disable(&lan966x->tx);
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err)
+ goto restore_tx;
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
+
+ kfree(tx_dcbs_buf);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.dma = rx_dma;
+ lan966x->tx.dma = tx_dma;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+restore_tx:
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x->tx.dcbs = tx_dcbs;
+ lan966x->tx.dcbs_buf = tx_dcbs_buf;
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+ int err;
+ u32 val;
+
+ max_mtu = lan966x_fdma_get_max_mtu(lan966x);
+ max_mtu += IFH_LEN * sizeof(u32);
+
+ if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
+ lan966x->rx.page_order)
+ return 0;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 95830e3e2b1f..138718f33dbd 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -24,9 +24,6 @@
#define XTR_NOT_READY 0x07000080U
#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
-#define READL_SLEEP_US 10
-#define READL_TIMEOUT_US 100000000
-
#define IO_RANGES 2
static const struct of_device_id lan966x_match[] = {
@@ -43,6 +40,7 @@ struct lan966x_main_io_resource {
static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
@@ -343,7 +341,10 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&lan966x->tx_lock);
- err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
spin_unlock(&lan966x->tx_lock);
return err;
@@ -353,12 +354,24 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
dev->mtu = new_mtu;
- return 0;
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
}
static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
@@ -432,8 +445,7 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
-static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
- struct sk_buff *skb)
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
{
u32 val;
@@ -520,7 +532,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
}
}
-static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
@@ -532,7 +544,7 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
-static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
{
packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
@@ -652,6 +664,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (port->dev)
unregister_netdev(port->dev);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
if (port->phylink) {
rtnl_lock();
lan966x_port_stop(port->dev);
@@ -672,8 +687,14 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
lan966x->ana_irq = -ENXIO;
}
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
if (lan966x->ptp_irq)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
@@ -802,12 +823,12 @@ static void lan966x_init(struct lan966x *lan966x)
/* Do byte-swap and expect status after last data word
* Extraction: Mode: manual extraction) | Byte_swap
*/
- lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_XTR_GRP_CFG(0));
/* Injection: Mode: manual injection | Byte_swap */
- lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_INJ_GRP_CFG(0));
@@ -916,7 +937,7 @@ static int lan966x_ram_init(struct lan966x *lan966x)
static int lan966x_reset_switch(struct lan966x *lan966x)
{
- struct reset_control *switch_reset, *phy_reset;
+ struct reset_control *switch_reset;
int val = 0;
int ret;
@@ -925,13 +946,7 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
"Could not obtain switch reset");
- phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
- if (IS_ERR(phy_reset))
- return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
- "Could not obtain phy reset\n");
-
reset_control_reset(switch_reset);
- reset_control_reset(phy_reset);
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
@@ -1029,6 +1044,31 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ptp = 1;
}
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1067,8 +1107,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_fdb;
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
return 0;
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
cleanup_fdb:
lan966x_fdb_deinit(lan966x);
@@ -1088,6 +1135,7 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index ae282da1da74..3b86ddddc756 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -17,6 +17,9 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
#define LAN966X_BUFFER_CELL_SZ 64
#define LAN966X_BUFFER_MEMORY (160 * 1024)
#define LAN966X_BUFFER_MIN_SZ 60
@@ -53,11 +56,28 @@
#define LAN966X_PHC_COUNT 3
#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
#define IFH_REW_OP_NOOP 0x0
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -73,6 +93,83 @@ enum macaccess_entry_type {
struct lan966x_port;
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ u8 channel_id;
+};
+
+struct lan966x_tx_dcb_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
struct lan966x_stat_layout {
u32 offset;
char name[ETH_GSTRING_LEN];
@@ -81,6 +178,7 @@ struct lan966x_stat_layout {
struct lan966x_phc {
struct ptp_clock *clock;
struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
struct hwtstamp_config hwtstamp_config;
struct lan966x *lan966x;
u8 index;
@@ -134,6 +232,8 @@ struct lan966x {
int xtr_irq;
int ana_irq;
int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -150,6 +250,13 @@ struct lan966x {
spinlock_t ptp_ts_id_lock; /* lock for ts_id */
struct mutex ptp_lock; /* lock for ptp interface state */
u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
};
struct lan966x_port_config {
@@ -195,6 +302,11 @@ bool lan966x_netdevice_check(const struct net_device *dev);
void lan966x_register_notifier_blocks(void);
void lan966x_unregister_notifier_blocks(void);
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
@@ -283,6 +395,15 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 237555845a52..f141644e4372 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -393,6 +393,9 @@ void lan966x_port_init(struct lan966x_port *port)
lan966x_port_config_down(port);
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 0a1041da4384..3a621c5165bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -16,7 +16,7 @@
*/
#define LAN966X_1PPB_FORMAT 3480517749LL
-#define TOD_ACC_PIN 0x5
+#define TOD_ACC_PIN 0x7
enum {
PTP_PIN_ACTION_IDLE = 0,
@@ -321,6 +321,63 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
return IRQ_HANDLED;
}
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
@@ -493,6 +550,207 @@ static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct ptp_clock_info lan966x_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "lan966x ptp",
@@ -501,6 +759,11 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.settime64 = lan966x_ptp_settime64,
.adjtime = lan966x_ptp_adjtime,
.adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
@@ -508,8 +771,19 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x,
struct ptp_clock_info *clock_info)
{
struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
if (IS_ERR(phc->clock))
return PTR_ERR(phc->clock);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 0c0b3e173d53..8265ad89f0bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -17,6 +17,7 @@ enum lan966x_target {
TARGET_CHIP_TOP = 5,
TARGET_CPU = 6,
TARGET_DEV = 13,
+ TARGET_FDMA = 21,
TARGET_GCB = 27,
TARGET_ORG = 36,
TARGET_PTP = 41,
@@ -578,6 +579,129 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
/* PTP:PTP_CFG:PTP_DOM_CFG */
#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
@@ -611,6 +735,12 @@ enum lan966x_target {
#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
#define PTP_PIN_CFG_PIN_DOM_SET(x)\
FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
@@ -638,6 +768,22 @@ enum lan966x_target {
#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 5389fffc694a..3429660cd2e5 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -396,6 +396,11 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
u32 mact_entry;
int res, err;
+ if (netif_is_bridge_master(v->obj.orig_dev)) {
+ sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
+ return 0;
+ }
+
/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
* Fall back to bridge vid 1.
*/
@@ -461,6 +466,11 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
u32 mact_entry, res, pgid_entry[3];
int err;
+ if (netif_is_bridge_master(v->obj.orig_dev)) {
+ sparx5_mact_forget(spx5, v->addr, v->vid);
+ return 0;
+ }
+
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
@@ -500,6 +510,7 @@ static int sparx5_handle_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
err = sparx5_handle_port_mdb_add(dev, nb,
SWITCHDEV_OBJ_PORT_MDB(obj));
break;
@@ -552,6 +563,7 @@ static int sparx5_handle_port_obj_del(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
err = sparx5_handle_port_mdb_del(dev, nb,
SWITCHDEV_OBJ_PORT_MDB(obj));
break;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ca71b62a44dc..0825a92599a5 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3228,6 +3228,7 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
+ const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3240,6 +3241,10 @@ int ocelot_init(struct ocelot *ocelot)
}
}
+ ocelot->num_stats = 0;
+ for_each_stat(ocelot, stat)
+ ocelot->num_stats++;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index a65606bb84a0..7e1f67be38f5 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -20,7 +20,7 @@
/* Default policer order */
#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
-int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
@@ -102,26 +102,30 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
/* Check limits */
if (pir > GENMASK(15, 0)) {
- dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
- port, pir, GENMASK(15, 0));
+ dev_err(ocelot->dev,
+ "Invalid pir for policer %u: %u (max %lu)\n",
+ pol_ix, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
- dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
- port, cir, GENMASK(15, 0));
+ dev_err(ocelot->dev,
+ "Invalid cir for policer %u: %u (max %lu)\n",
+ pol_ix, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
- dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
- port, pbs, pbs_max);
+ dev_err(ocelot->dev,
+ "Invalid pbs for policer %u: %u (max %u)\n",
+ pol_ix, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
- dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
- port, cbs, cbs_max);
+ dev_err(ocelot->dev,
+ "Invalid cbs for policer %u: %u (max %u)\n",
+ pol_ix, cbs, cbs_max);
return -EINVAL;
}
@@ -211,7 +215,7 @@ int ocelot_port_policer_add(struct ocelot *ocelot, int port,
dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
__func__, port, pp.pir, pp.pbs);
- err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -235,7 +239,7 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port)
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index 7552995f8b17..0749f23684f2 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -31,7 +31,7 @@ struct qos_policer_conf {
u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
};
-int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix,
struct qos_policer_conf *conf);
int ocelot_policer_validate(const struct flow_action *action,
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index c8701ac955a8..cdbe29f2ddc7 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -672,12 +672,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix / 2;
u32 type;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -813,11 +811,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -918,7 +914,7 @@ int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
if (!tmp)
return -ENOMEM;
- ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ ret = qos_policer_conf_set(ocelot, pol_ix, &pp);
if (ret) {
kfree(tmp);
return ret;
@@ -949,7 +945,7 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
if (z) {
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ return qos_policer_conf_set(ocelot, pol_ix, &pp);
}
return 0;
@@ -997,8 +993,8 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
+ struct list_head *pos = &block->rules;
struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *n;
int ret;
ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
@@ -1007,17 +1003,13 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
block->count++;
- if (list_empty(&block->rules)) {
- list_add(&filter->list, &block->rules);
- return 0;
- }
-
- list_for_each_safe(pos, n, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
- if (filter->prio < tmp->prio)
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (filter->prio < tmp->prio) {
+ pos = &tmp->list;
break;
+ }
}
- list_add(&filter->list, pos->prev);
+ list_add_tail(&filter->list, pos);
return 0;
}
@@ -1402,22 +1394,18 @@ static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
int ocelot_vcap_init(struct ocelot *ocelot)
{
- int i;
+ struct qos_policer_conf cpu_drop = {
+ .mode = MSCC_QOS_RATE_MODE_DATA,
+ };
+ int ret, i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
* frames.
*/
- ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
- OCELOT_POLICER_DISCARD);
+ ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop);
+ if (ret)
+ return ret;
for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
struct ocelot_vcap_block *block = &ocelot->block[i];
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 4f4a495a60ad..961f803aca19 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -190,6 +190,7 @@ static const struct ocelot_stat_layout ocelot_stats_layout[] = {
{ .name = "drop_green_prio_5", .offset = 0x8F, },
{ .name = "drop_green_prio_6", .offset = 0x90, },
{ .name = "drop_green_prio_7", .offset = 0x91, },
+ OCELOT_STAT_END
};
static void ocelot_pll5_init(struct ocelot *ocelot)
@@ -227,7 +228,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
- ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 21d2645885ce..fe5e77330f5f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
int status;
unsigned i;
- if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ if (request_firmware(&fw, mgp->fw_name, dev) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 82a22711ce45..50bca486a244 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) readl(ee_addr)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index aa7c093f1f91..db4dfae8c01d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4351,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
}
ll_config->tx_steering_type = TX_MULTIQ_STEERING;
ll_config->intr_type = MSI_X;
- ll_config->napi_weight = NEW_NAPI_WEIGHT;
+ ll_config->napi_weight = NAPI_POLL_WEIGHT;
ll_config->rth_steering = RTH_STEERING;
/* get the default configuration parameters */
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 63f65193dd49..da9d2c191828 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -167,8 +167,6 @@ struct macInfo {
struct vxge_config {
int tx_pause_enable;
int rx_pause_enable;
-
-#define NEW_NAPI_WEIGHT 64
int napi_weight;
int intr_type;
#define INTA 0
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 84d66d138c3d..78368e71ce83 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
req_sz = sizeof(struct nfp_crypto_req_add_v6);
ipv6 = true;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index bfd7d1c35076..1edcd9f86c9c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -83,6 +83,10 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
entry2->rule->match.dissector->used_keys;
bool out;
+ if (entry1->netdev && entry2->netdev &&
+ entry1->netdev != entry2->netdev)
+ return -EINVAL;
+
/* check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
@@ -914,7 +918,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
/* Check that the two tc flows are also compatible with
* the nft entry. No need to check the pre_ct and post_ct
* entries as that was already done during pre_merge.
- * The nft entry does not have a netdev or chain populated, so
+ * The nft entry does not have a chain populated, so
* skip this check.
*/
err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
@@ -999,8 +1003,6 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
pre_ct_entry = ct_entry2;
}
- if (post_ct_entry->netdev != pre_ct_entry->netdev)
- return -EINVAL;
/* Checks that the chain_index of the filter matches the
* chain_index of the GOTO action.
*/
@@ -1114,6 +1116,20 @@ err_tc_merge_tb_init:
return ERR_PTR(err);
}
+static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
+ return __dev_get_by_index(&init_net,
+ match.key->ingress_ifindex);
+ }
+
+ return NULL;
+}
+
static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
@@ -1154,6 +1170,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
entry->rule->match.dissector = &nft_match->dissector;
entry->rule->match.mask = &nft_match->mask;
entry->rule->match.key = &nft_match->key;
+
+ if (!netdev)
+ netdev = get_netdev_from_rule(entry->rule);
} else {
entry->rule->match.dissector = flow->rule->match.dissector;
entry->rule->match.mask = flow->rule->match.mask;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b412670d89b2..5528d12d1f48 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2259,8 +2259,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
- if (nn->cap & NFP_NET_CFG_CTRL_LSO)
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 3fdaaf8ed2ba..4627715a5e32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -95,15 +95,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN;
+ bool is_proto_sup = true;
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
if (err)
return err;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan_proto))
return -EOPNOTSUPP;
if (vlan > 4095 || qos > 7) {
@@ -112,14 +114,32 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
return -EINVAL;
}
+ /* Check if fw supports or not */
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+ if (err)
+ is_proto_sup = false;
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ if (!is_proto_sup)
+ return -EOPNOTSUPP;
+ update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO;
+ }
+
/* Write VLAN tag to VF entry in VF config symbol */
- vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
+ vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos);
+
+ /* vlan_tag of 0 means that the configuration should be cleared and in
+ * such circumstances setting the TPID has no meaning when
+ * configuring firmware.
+ */
+ if (vlan_tag && is_proto_sup)
+ vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto));
+
vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ;
- writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN,
- "vlan");
+ return nfp_net_sriov_update(app, vf, update, "vlan");
}
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
@@ -209,7 +229,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
u32 mac_hi;
u16 mac_lo;
u8 flags;
@@ -225,7 +245,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL);
- vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vf;
@@ -233,9 +253,10 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
put_unaligned_be32(mac_hi, &ivi->mac[0]);
put_unaligned_be16(mac_lo, &ivi->mac[4]);
- ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci);
- ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
-
+ ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
+ ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+ ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index 786be58a907e..7b72cc083476 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -19,6 +19,7 @@
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
@@ -26,6 +27,7 @@
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -43,6 +45,7 @@
#define NFP_NET_VF_CFG_LS_MODE_ENABLE 1
#define NFP_NET_VF_CFG_LS_MODE_DISABLE 2
#define NFP_NET_VF_CFG_VLAN 0x8
+#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000
#define NFP_NET_VF_CFG_VLAN_QOS 0xe000
#define NFP_NET_VF_CFG_VLAN_VID 0x0fff
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 3d379e937184..ddb34bfb9bef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -13,22 +13,36 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/stringify.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
-#define nfp_err(cpp, fmt, args...) \
+#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x
+
+#define __nfp_err(cpp, fmt, args...) \
dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_warn(cpp, fmt, args...) \
+#define __nfp_warn(cpp, fmt, args...) \
dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_info(cpp, fmt, args...) \
+#define __nfp_info(cpp, fmt, args...) \
dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_dbg(cpp, fmt, args...) \
+#define __nfp_dbg(cpp, fmt, args...) \
dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define __nfp_printk(level, cpp, fmt, args...) \
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
+
+#define nfp_err(cpp, fmt, args...) \
+ __nfp_err(cpp, string_format(fmt), ## args)
+#define nfp_warn(cpp, fmt, args...) \
+ __nfp_warn(cpp, string_format(fmt), ## args)
+#define nfp_info(cpp, fmt, args...) \
+ __nfp_info(cpp, string_format(fmt), ## args)
+#define nfp_dbg(cpp, fmt, args...) \
+ __nfp_dbg(cpp, string_format(fmt), ## args)
#define nfp_printk(level, cpp, fmt, args...) \
- dev_printk(level, nfp_cpp_device(cpp)->parent, \
- NFP_SUBSYS ": " fmt, ## args)
+ __nfp_printk(level, cpp, string_format(fmt), ## args)
#define PCI_64BIT_BAR_COUNT 3
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 660013f716d4..5116badaf091 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -56,8 +56,8 @@
#include <asm/irq.h>
-#define TX_WORK_PER_LOOP 64
-#define RX_WORK_PER_LOOP 64
+#define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT
+#define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT
/*
* Hardware access:
@@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
else
dev->netdev_ops = &nv_netdev_ops_optimized;
- netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
+ netif_napi_add(dev, &np->napi, nv_napi_poll, NAPI_POLL_WEIGHT);
dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 1dc40c537281..46da937ad27f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -32,8 +32,6 @@
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
-#define PCH_GBE_TX_WEIGHT 64
-#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
/* Initialize the wake-on-LAN settings */
@@ -1469,7 +1467,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc->gbec_status, tx_desc->dma_status);
unused = PCH_GBE_DESC_UNUSED(tx_ring);
- thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
+ thresh = tx_ring->count - NAPI_POLL_WEIGHT;
if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
{ /* current marked clean, tx queue filling up, do extra clean */
int j, k;
@@ -1482,13 +1480,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
/* current marked clean, scan for more that need cleaning. */
k = i;
- for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
+ for (j = 0; j < NAPI_POLL_WEIGHT; j++)
{
tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
if (++k >= tx_ring->count) k = 0; /*increment, wrap*/
}
- if (j < PCH_GBE_TX_WEIGHT) {
+ if (j < NAPI_POLL_WEIGHT) {
netdev_dbg(adapter->netdev,
"clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
unused, j, i, k, tx_ring->next_to_use,
@@ -1547,7 +1545,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ if (cleaned_count++ == NAPI_POLL_WEIGHT) {
cleaned = false;
break;
}
@@ -2519,7 +2517,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
- pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ pch_gbe_napi_poll, NAPI_POLL_WEIGHT);
netdev->hw_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features = netdev->hw_features;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 0d9c2fe0245d..3d2098f21bb7 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o
qed-$(CONFIG_QED_NVMETCP) += \
qed_nvmetcp.o \
- qed_nvmetcp_fw_funcs.o \
- qed_nvmetcp_ip_services.o
+ qed_nvmetcp_fw_funcs.o
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
deleted file mode 100644
index 96a2077fd315..000000000000
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-/*
- * Copyright 2021 Marvell. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/param.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-
-#include <net/tcp.h>
-
-#include <linux/qed/qed_nvmetcp_ip_services_if.h>
-
-#define QED_IP_RESOL_TIMEOUT 4
-
-int qed_route_ipv4(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- __be32 *loc_ip, *rem_ip;
- struct rtable *rt;
- int rc = -ENXIO;
- int retry;
-
- loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
- rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
- *ndev = NULL;
- rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
- if (IS_ERR(rt)) {
- pr_err("lookup route failed\n");
- rc = PTR_ERR(rt);
- goto return_err;
- }
-
- neigh = dst_neigh_lookup(&rt->dst, rem_ip);
- if (!neigh) {
- rc = -ENOMEM;
- ip_rt_put(rt);
- goto return_err;
- }
-
- *ndev = rt->dst.dev;
- ip_rt_put(rt);
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- /* copy resolved MAC address */
- neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
- if (!(*loc_ip)) {
- *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
- local_addr->ss_family = AF_INET;
- }
-
-return_err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv4);
-
-int qed_route_ipv6(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- struct dst_entry *dst;
- struct flowi6 fl6;
- int rc = -ENXIO;
- int retry;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
- fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst || dst->error) {
- if (dst) {
- dst_release(dst);
- pr_err("lookup route failed %d\n", dst->error);
- }
-
- goto out;
- }
-
- neigh = dst_neigh_lookup(dst, &fl6.daddr);
- if (neigh) {
- *ndev = ip6_dst_idev(dst)->dev;
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- neigh_ha_snapshot((u8 *)hardware_address->sa_data,
- neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
-
- if (ipv6_addr_any(&fl6.saddr)) {
- if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
- &fl6.daddr, 0, &fl6.saddr)) {
- pr_err("Unable to find source IP address\n");
- goto out;
- }
-
- local_addr->ss_family = AF_INET6;
- ((struct sockaddr_in6 *)local_addr)->sin6_addr =
- fl6.saddr;
- }
- }
-
- dst_release(dst);
-
-out:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv6);
-
-void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
-{
- if (is_vlan_dev(*ndev)) {
- *vlan_id = vlan_dev_vlan_id(*ndev);
- *ndev = vlan_dev_real_dev(*ndev);
- }
-}
-EXPORT_SYMBOL(qed_vlan_get_ndev);
-
-struct pci_dev *qed_validate_ndev(struct net_device *ndev)
-{
- struct pci_dev *pdev = NULL;
- struct net_device *upper;
-
- for_each_pci_dev(pdev) {
- if (pdev && pdev->driver &&
- !strcmp(pdev->driver->name, "qede")) {
- upper = pci_get_drvdata(pdev);
- if (upper->ifindex == ndev->ifindex)
- return pdev;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(qed_validate_ndev);
-
-__be16 qed_get_in_port(struct sockaddr_storage *sa)
-{
- return sa->ss_family == AF_INET
- ? ((struct sockaddr_in *)sa)->sin_port
- : ((struct sockaddr_in6 *)sa)->sin6_port;
-}
-EXPORT_SYMBOL(qed_get_in_port);
-
-int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
- struct socket **sock, u16 *port)
-{
- struct sockaddr_storage sa;
- int rc = 0;
-
- rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
- sock);
- if (rc) {
- pr_warn("failed to create socket: %d\n", rc);
- goto err;
- }
-
- (*sock)->sk->sk_allocation = GFP_KERNEL;
- sk_set_memalloc((*sock)->sk);
-
- rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
- sizeof(local_ip_addr));
-
- if (rc) {
- pr_warn("failed to bind socket: %d\n", rc);
- goto err_sock;
- }
-
- rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
- if (rc < 0) {
- pr_warn("getsockname() failed: %d\n", rc);
- goto err_sock;
- }
-
- *port = ntohs(qed_get_in_port(&sa));
-
- return 0;
-
-err_sock:
- sock_release(*sock);
- sock = NULL;
-err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_fetch_tcp_port);
-
-void qed_return_tcp_port(struct socket *sock)
-{
- if (sock && sock->sk) {
- tcp_set_state(sock->sk, TCP_CLOSE);
- sock_release(sock);
- }
-}
-EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 39176e765767..c9c8225f04d6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -496,19 +496,19 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
&edev->flags)) {
- DP_ERR(edev, "Timestamping in progress\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
edev->ptp_skip_txts++;
return;
}
if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
- DP_ERR(edev,
- "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamping was not enabled, this pkt will not be timestamped\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else if (unlikely(ptp->tx_skb)) {
- DP_ERR(edev,
- "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Device supports a single outstanding pkt to ts, It will not be ts\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else {
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 63f0d2d0e87b..b202184eddd4 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-/* Delay between EEPROM clock transitions. */
-#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
-
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 97ce64079855..846fff16fa48 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -17,14 +17,14 @@ config NET_VENDOR_SOLARFLARE
if NET_VENDOR_SOLARFLARE
config SFC
- tristate "Solarflare SFC9000/SFC9100/EF100-family support"
+ tristate "Solarflare SFC9100/EF100-family support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select CRC32
help
This driver supports 10/40-gigabit Ethernet cards based on
- the Solarflare SFC9000-family and SFC9100-family controllers.
+ the Solarflare SFC9100-family controllers.
It also supports 10/25/40/100-gigabit Ethernet cards based
on the Solarflare EF100 networking IP in Xilinx FPGAs.
@@ -47,11 +47,11 @@ config SFC_MCDI_MON
This exposes the on-board firmware-managed sensors as a
hardware monitor device.
config SFC_SRIOV
- bool "Solarflare SFC9000-family SR-IOV support"
+ bool "Solarflare SFC9000/SFC9100-family SR-IOV support"
depends on SFC && PCI_IOV
default y
help
- This enables support for the SFC9000 I/O Virtualization
+ This enables support for the Single Root I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
config SFC_MCDI_LOGGING
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8bd01c429f91..9b3374cf7937 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
sfc-y += efx.o efx_common.o efx_channels.o nic.o \
- farch.o siena.o ef10.o \
+ ef10.o \
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
@@ -8,7 +8,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50d535981a35..c9ee5011803f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index ffdb36715a49..173f0ecebc70 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,6 +17,7 @@
#include "io.h"
#include "ef100_nic.h"
#include "ef100_netdev.h"
+#include "ef100_sriov.h"
#include "ef100_regs.h"
#include "ef100.h"
@@ -436,6 +437,10 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
* blocks, so we have to do it before PCI removal.
*/
unregister_netdevice_notifier(&efx->netdev_notifier);
+#if defined(CONFIG_SFC_SRIOV)
+ if (!efx->type->is_vf)
+ efx_ef100_pci_sriov_disable(efx);
+#endif
ef100_remove(efx);
efx_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
@@ -524,6 +529,23 @@ fail:
return rc;
}
+#ifdef CONFIG_SFC_SRIOV
+static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct efx_nic *efx = pci_get_drvdata(dev);
+ int rc;
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ }
+ return -ENOENT;
+}
+#endif
+
/* PCI device ID table */
static const struct pci_device_id ef100_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
@@ -538,6 +560,9 @@ struct pci_driver ef100_pci_driver = {
.id_table = ef100_pci_table,
.probe = ef100_pci_probe,
.remove = ef100_pci_remove,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = ef100_pci_sriov_configure,
+#endif
.err_handler = &efx_err_handlers,
};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index a07cbf45a326..b04911bc8c57 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -22,6 +22,7 @@
#include "mcdi_filters.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
+#include "ef100_sriov.h"
#include "ef100_netdev.h"
#include "rx_common.h"
@@ -787,6 +788,9 @@ const struct efx_nic_type ef100_pf_nic_type = {
.update_stats = ef100_update_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef100_sriov_configure,
+#endif
/* Per-type bar/size configuration not used on ef100. Location of
* registers is defined by extended capabilities.
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c
new file mode 100644
index 000000000000..664578176bfe
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_sriov.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_sriov.h"
+#include "ef100_nic.h"
+
+static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ int rc;
+
+ efx->vf_count = num_vfs;
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
+ efx->vf_count = 0;
+ return rc;
+}
+
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+ if (vfs_assigned) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ pci_disable_sriov(dev);
+
+ return 0;
+}
+
+int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef100_pci_sriov_disable(efx);
+ else
+ return efx_ef100_pci_sriov_enable(efx, num_vfs);
+}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h
new file mode 100644
index 000000000000..c48fccd46c57
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_sriov.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include "net_driver.h"
+
+int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 302dc835ac3d..5e7fe75cb1d4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -795,10 +795,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
@@ -1294,12 +1290,6 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_init_sriov();
- if (rc)
- goto err_sriov;
-#endif
-
rc = efx_create_reset_workqueue();
if (rc)
goto err_reset;
@@ -1319,10 +1309,6 @@ static int __init efx_init_module(void)
err_pci:
efx_destroy_reset_workqueue();
err_reset:
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
- err_sriov:
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -1335,9 +1321,6 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index daf0c00c1242..c05a83da9e44 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -28,7 +28,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct
ef100_enqueue_skb, __efx_enqueue_skb,
tx_queue, skb);
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 377df8b7f015..eec80b024195 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -51,28 +51,7 @@ MODULE_PARM_DESC(irq_adapt_high_thresh,
*/
static int napi_weight = 64;
-/***************
- * Housekeeping
- ***************/
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
+static const struct efx_channel_type efx_default_channel_type;
/*************
* INTERRUPTS
@@ -619,6 +598,7 @@ void efx_fini_channels(struct efx_nic *efx)
/* Allocate and initialise a channel structure, copying parameters
* (but not resources) from an old channel structure.
*/
+static
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_rx_queue *rx_queue;
@@ -696,7 +676,8 @@ fail:
return rc;
}
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
@@ -1004,7 +985,7 @@ int efx_set_channels(struct efx_nic *efx)
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
}
-bool efx_default_channel_want_txqs(struct efx_channel *channel)
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
@@ -1362,3 +1343,26 @@ void efx_fini_napi(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index d77ec1f77fb1..64abb99a56b8 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -32,16 +32,13 @@ void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx);
-bool efx_default_channel_want_txqs(struct efx_channel *channel);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
-struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
@@ -50,7 +47,6 @@ void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
-int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index af37c990217e..f6577e74d6e6 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -51,8 +51,8 @@ static unsigned int efx_monitor_interval = 1 * HZ;
/* Default stats update time */
#define STATS_PERIOD_MS_DEFAULT 1000
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 0c6cc2191369..6bbdb5d2eebf 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
struct ef4_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct iommu_domain __maybe_unused *domain;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
#else
- if (iommu_present(&pci_bus_type))
+ domain = iommu_get_domain_for_dev(&efx->pci_dev->dev);
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY)
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 148dcd48b58d..9599123bc28d 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -16,6 +16,7 @@
#include "bitfield.h"
#include "efx.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index d3fcbf930dba..ff617b1b38d3 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -73,8 +73,8 @@
* \------------------------------ Resync (always set)
*
* The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writting
- * back into shared memory, or by writting out an event.
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c75dc75e2857..318db906a154 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -612,11 +612,6 @@ extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
-extern const char *const efx_reset_type_names[];
-extern const unsigned int efx_reset_type_max;
-#define RESET_TYPE(type) \
- STRING_TABLE_LOOKUP(type, efx_reset_type)
-
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 5c2fe3ce3f4d..251868235ae4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -301,10 +301,6 @@ struct efx_ef10_nic_data {
int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
-int efx_init_sriov(void);
-void efx_fini_sriov(void);
-
-extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
new file mode 100644
index 000000000000..89a7fd47b057
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -0,0 +1,17204 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 2
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_SEQ_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
+#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_OFST 0
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_OFST 0
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_OFST 0
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_OFST 0
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_OFST 0
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_OFST 0
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_OFST 0
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
+#define MCDI_EVENT_AOE_ERR_DATA_OFST 0
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_OFST 0
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_OFST 0
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_OFST 0
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_OFST 0
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_OFST 0
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_OFST 0
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4
+/* Enum values, see field(s): */
+/* MCDI_EVENT/LINKCHANGE_SPEED */
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30
+#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
+#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
+#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+/* Alias for PTP_DATA. */
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+/* Data associated with PTP events which doesn't fit into the main DATA field
+ */
+#define MCDI_EVENT_PTP_DATA_LBN 36
+#define MCDI_EVENT_PTP_DATA_WIDTH 8
+/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the
+ * event ring
+ */
+#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59
+#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
+/* enum: Link change. This event is sent instead of LINKCHANGE if
+ * WANT_V2_LINKCHANGES was set on driver attach.
+ */
+#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20
+/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach
+ * when the local device capabilities changes. This will usually correspond to
+ * a module change.
+ */
+#define MCDI_EVENT_CODE_MODULECHANGE 0x21
+/* enum: Notification that the sensors have been added and/or removed from the
+ * sensor table. This event includes the new sensor table generation count, if
+ * this does not match the driver's local copy it is expected to call
+ * DYNAMIC_SENSORS_LIST to refresh it.
+ */
+#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22
+/* enum: Notification that a sensor has changed state as a result of a reading
+ * crossing a threshold. This is sent as two events, the first event contains
+ * the handle and the sensor's state (in the SRC field), and the second
+ * contains the value.
+ */
+#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23
+/* enum: Notification that a descriptor proxy function configuration has been
+ * pushed to "live" status (visible to host). SRC field contains the handle of
+ * the affected descriptor proxy function. DATA field contains the generation
+ * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET /
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24
+/* enum: Notification that a descriptor proxy function has been reset. SRC
+ * field contains the handle of the affected descriptor proxy function. See
+ * SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25
+/* enum: Notification that a driver attached to a descriptor proxy function.
+ * SRC field contains the handle of the affected descriptor proxy function. For
+ * Virtio proxy functions this message consists of two MCDI events, where the
+ * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0
+ * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy
+ * functions event length and meaning of DATA field is not yet defined. See
+ * SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32
+#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32
+/* The new generation count after a sensor has been added or deleted. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32
+/* The handle of a dynamic sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32
+/* The current values of a sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32
+/* The current state of a sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36
+#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8
+#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0
+#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4
+#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0
+#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32
+/* Generation count of applied configuration set */
+#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0
+#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4
+#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0
+#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32
+/* Virtio features negotiated with the host driver. First event (CONT=1)
+ * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63.
+ */
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
+#define MUM_EVENT_SENSOR_ID_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_OFST 0
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_OFST 0
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4)
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs
+ * found on Riverhead designs
+ */
+#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26
+
+/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted
+ * firmware version information
+ */
+#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20
+/* MC firmware build date (as Unix timestamp) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+/* MC firmware version number */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+/* MC firmware security level */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
+/* MC firmware extra version info (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about adapter components.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change the frequency correction applied to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. Not implemented. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC. Siena PTP adapters only.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1)
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1)
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20)
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1)
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2)
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
+#define MC_CMD_DRV_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_OFST 0
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver
+ * version
+ */
+#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4
+/* MC_CMD_DRV_ATTACH_OFST 0 */
+/* MC_CMD_DRV_ATTACH_LBN 0 */
+/* MC_CMD_DRV_ATTACH_WIDTH 1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1
+/* MC_CMD_DRV_PREBOOT_OFST 0 */
+/* MC_CMD_DRV_PREBOOT_LBN 1 */
+/* MC_CMD_DRV_PREBOOT_WIDTH 1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+/* MC_CMD_FW_FULL_FEATURED 0x0 */
+/* enum: Prefer to use firmware with fewer features but lower latency */
+/* MC_CMD_FW_LOW_LATENCY 0x1 */
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+/* MC_CMD_FW_PACKED_STREAM 0x2 */
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+/* MC_CMD_FW_HIGH_TX_RATE 0x3 */
+/* enum: Reserved value */
+/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+/* MC_CMD_FW_RULES_ENGINE 0x5 */
+/* enum: Prefer to use firmware with additional DPDK support */
+/* MC_CMD_FW_DPDK 0x6 */
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+/* MC_CMD_FW_L3XUDP 0x7 */
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */
+/* enum: Only this option is allowed for non-admin functions */
+/* MC_CMD_FW_DONT_CARE 0xffffffff */
+/* Version of the driver to be reported by management protocols (e.g. NC-SI)
+ * handled by the NIC. This is a zero-terminated ASCII string.
+ */
+#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12
+#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
+/* enum: Used during development only. Should no longer be used. */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5
+/* enum: If set, indicates that TX only spreading is enabled. Even-numbered
+ * TXQs will use one engine, and odd-numbered TXQs will use the other. This
+ * also has the effect that only even-numbered RXQs will receive traffic.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1)
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
+#define MC_CMD_PUTS_IN_UART_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_OFST 0
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
+#define MC_CMD_PHY_CAP_10HDX_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_OFST 8
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_OFST 8
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_OFST 8
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_OFST 8
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_OFST 8
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_OFST 8
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_OFST 8
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_OFST 8
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_OFST 8
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_OFST 8
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_OFST 8
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_OFST 8
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_OFST 8
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_OFST 8
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_OFST 8
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define AN_TYPE_LEN 4
+#define AN_TYPE_TYPE_OFST 0
+#define AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define MC_CMD_AN_CLAUSE73 0x3
+#define AN_TYPE_TYPE_LBN 0
+#define AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define FEC_TYPE_LEN 4
+#define FEC_TYPE_TYPE_OFST 0
+#define FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define MC_CMD_FEC_RS 0x2
+#define FEC_TYPE_TYPE_LBN 0
+#define FEC_TYPE_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
+
+/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence
+ * number to ensure this SET_LINK command corresponds to the latest
+ * MODULECHANGE event.
+ */
+#define MC_CMD_SET_LINK_IN_V2_LEN 17
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V4 0x7d
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3
+#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1)
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1)
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe
+/* enum: Internal-error. The bundle header is invalid. */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16
+/* enum: Internal-error. Component hash calculation failed. */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
+/* enum: The update operation is in-progress. */
+#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
+
+/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4
+/* Flags controlling information retrieved */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Engineering sensor 1 */
+#define MC_CMD_SENSOR_ENGINEERING_1 0x57
+/* enum: Engineering sensor 2 */
+#define MC_CMD_SENSOR_ENGINEERING_2 0x58
+/* enum: Engineering sensor 3 */
+#define MC_CMD_SENSOR_ENGINEERING_3 0x59
+/* enum: Engineering sensor 4 */
+#define MC_CMD_SENSOR_ENGINEERING_4 0x5a
+/* enum: Engineering sensor 5 */
+#define MC_CMD_SENSOR_ENGINEERING_5 0x5b
+/* enum: Engineering sensor 6 */
+#define MC_CMD_SENSOR_ENGINEERING_6 0x5c
+/* enum: Engineering sensor 7 */
+#define MC_CMD_SENSOR_ENGINEERING_7 0x5d
+/* enum: Engineering sensor 8 */
+#define MC_CMD_SENSOR_ENGINEERING_8 0x5e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
+/* Flags controlling information retrieved */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4)
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1)
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation, see SF-110495-PS for details of CLP
+ * processing. This command has been extended to accomodate the requirements of
+ * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
+ * SF-120509-TC and SF-117282-PS.
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
+#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
+#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_OP_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
+ * should match the equivalent structure in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24
+/* A value below this will trigger a warning event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32
+/* A value below this will trigger a critical event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32
+/* A value below this will shut down the card. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32
+/* A value above this will trigger a warning event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32
+/* A value above this will trigger a critical event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32
+/* A value above this will shut down the card. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32
+
+/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64
+/* The handle used to identify the sensor in calls to
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32
+/* A human-readable name for the sensor (zero terminated string, max 32 bytes)
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256
+/* The type of the sensor device, and by implication the unit of that the
+ * values will be reported in
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4
+/* enum: A voltage sensor. Unit is mV */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0
+/* enum: A current sensor. Unit is mA */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1
+/* enum: A power sensor. Unit is mW */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2
+/* enum: A temperature sensor. Unit is Celsius */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3
+/* enum: A cooling fan sensor. Unit is RPM */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32
+/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192
+
+/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12
+/* The handle used to identify the sensor */
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32
+/* The current value of the sensor */
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32
+/* The sensor's condition, e.g. good, broken or removed */
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4
+/* enum: Sensor working normally within limits */
+#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0
+/* enum: Warning threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1
+/* enum: Critical threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2
+/* enum: Fatal threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3
+/* enum: Sensor not working */
+#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4
+/* enum: Sensor working but no reading available */
+#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5
+/* enum: Sensor initialization failed */
+#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_LIST
+ * Return a complete list of handles for sensors currently managed by the MC,
+ * and a generation count for this version of the sensor table. On systems
+ * advertising the DYNAMIC_SENSORS capability bit, this replaces the
+ * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
+ * added by the NMC.
+ *
+ * Sensor handles are persistent for the lifetime of the sensor and are used to
+ * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
+ *
+ * The generation count is maintained by the MC, is persistent across reboots
+ * and will be incremented each time the sensor table is modified. When the
+ * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
+ * containing the new generation count. The driver should compare this against
+ * the current generation count, and if it is different, call
+ * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
+ *
+ * The sensor count is provided to allow a future path to supporting more than
+ * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
+ * the maximum number that will fit in a single response. As this is a fairly
+ * large number (253) it is not anticipated that this will be needed in the
+ * near future, so can currently be ignored.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
+#undef MC_CMD_0x66_PRIVILEGE_CTG
+
+#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4)
+/* Generation count, which will be updated each time a sensor is added to or
+ * removed from the MC sensor table.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4
+/* Number of sensors managed by the MC. Note that in principle, this can be
+ * larger than the size of the HANDLES array.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
+ * Get descriptions for a set of sensors, specified as an array of sensor
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for
+ * `get_descriptions` in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
+#undef MC_CMD_0x67_PRIVILEGE_CTG
+
+#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64)
+/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
+ * Read the state and value for a set of sensors, specified as an array of
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
+ *
+ * In the case of a broken sensor, then the state of the response's
+ * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
+ * provided should be treated as erroneous.
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
+#undef MC_CMD_0x68_PRIVILEGE_CTG
+
+#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12)
+/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_EVENT_CTRL
+ * Configure which categories of unsolicited events the driver expects to
+ * receive (Riverhead).
+ */
+#define MC_CMD_EVENT_CTRL 0x69
+#undef MC_CMD_0x69_PRIVILEGE_CTG
+
+#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVENT_CTRL_IN msgrequest */
+#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
+#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
+#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
+#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
+/* Array of event categories for which the driver wishes to receive events. */
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
+/* enum: Driver wishes to receive LINKCHANGE events. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
+/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
+ */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
+/* enum: Driver wishes to receive receive errors. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
+/* enum: Driver wishes to receive transmit errors. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
+/* enum: Driver wishes to receive firmware alerts. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
+/* enum: Driver wishes to receive reboot events. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
+
+/* MC_CMD_EVENT_CTRL_OUT msgrequest */
+#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
+/* enum: Bundle image partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00
+/* enum: Bundle metadata partition that holds additional information related to
+ * a bundle update in TLV format
+ */
+#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01
+/* enum: Bundle update non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
+/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_OFST 0
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_OFST 0
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_OFST 0
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_OFST 0
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_OFST 0
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_OFST 0
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_OFST 0
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_OFST 0
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_OFST 0
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_OFST 0
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_OFST 0
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_OFST 0
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_OFST 0
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_OFST 0
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+
+/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required
+ * for systems with a QDMA (currently, Riverhead)
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_LEN 564
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560
+#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560
+#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4
+
+/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a
+ * different RX packet prefix
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_LEN 568
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560
+#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560
+#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4
+/* Prefix id for the RX prefix format to use on packets delivered this queue.
+ * Zero is always a valid prefix id and means the default prefix format
+ * documented for the platform. Other prefix ids can be obtained by calling
+ * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564
+#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+/* enum: read the list of supported matches for the encapsulation detection
+ * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
+ * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
+ * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
+ * supported match types that can be used in the encapsulation detection rules
+ * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD.
+ */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FLAGS values for
+ * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+
+/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+
+/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+
+/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4
+/* Size of indirection table to be allocated to this context from the pool.
+ * Must be a power of 2. The minimum and maximum table size can be queried
+ * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in
+ * the common pool to allocate the requested table size, due to allocating
+ * table space to other RSS contexts, then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
+ * Write a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
+#undef MC_CMD_0x13e_PRIVILEGE_CTG
+
+#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array of index-value pairs to be written to the table. Structure is
+ * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
+/* The index of the table entry to be written. */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
+/* The value to write into the table entry. */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_READ_TABLE
+ * Read a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
+#undef MC_CMD_0x13f_PRIVILEGE_CTG
+
+#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array containing the indices of the entries to be read. */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
+/* A buffer containing the requested entries read from the table. */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6)
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4)
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
+
+/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
+/* Bitmask of engineering port modes available on the board (indexed by
+ * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
+ * contains all modes implemented in firmware for a particular board. Modes
+ * listed in MODES are considered production modes and should be exposed in
+ * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * should be considered hidden (not to be exposed in userland tools) and for
+ * engineering use only. There are no other semantic differences and any mode
+ * listed in either MODES or ENGINEERING_MODES can be set on the board.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
+#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
+
+
+/***********************************/
+/* MC_CMD_OVERRIDE_PORT_MODE
+ * Override flash config port mode for subsequent MC reboot(s). Override data
+ * is stored in the presistent data section of DMEM and activated on next MC
+ * warm reboot. A cold reboot resets the override. It is assumed that a
+ * sufficient number of PFs are available and that port mapping is valid for
+ * the new port mode, as the override does not affect PF configuration.
+ */
+#define MC_CMD_OVERRIDE_PORT_MODE 0x137
+#undef MC_CMD_0x137_PRIVILEGE_CTG
+
+#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
+/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
+
+/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
+#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_ADD
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
+#undef MC_CMD_0x16d_PRIVILEGE_CTG
+
+#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
+/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
+/* Any non-zero bits other than the ones named below or an unsupported
+ * combination will cause the NIC to return EOPNOTSUPP. In the future more
+ * flags may be added.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
+/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
+ * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
+/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
+ * (Deprecated)
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
+/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
+/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
+ * case of IPv4, the IP should be in the first 4 bytes and all other bytes
+ * should be zero.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
+/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
+/* Actions that should be applied to packets match the rule. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
+/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
+/* Handle to inserted rule. Used for removing the rule. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
+ * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
+#undef MC_CMD_0x16e_PRIVILEGE_CTG
+
+#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
+/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+
+/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
+ * defined in SF-120734-TC with more information in SF-122717-TC.
+ */
+#define FUNCTION_PERSONALITY_LEN 4
+#define FUNCTION_PERSONALITY_ID_OFST 0
+#define FUNCTION_PERSONALITY_ID_LEN 4
+/* enum: Function has no assigned personality */
+#define FUNCTION_PERSONALITY_NULL 0x0
+/* enum: Function has an EF100-style function control window and VI windows
+ * with both EF100 and vDPA doorbells.
+ */
+#define FUNCTION_PERSONALITY_EF100 0x1
+/* enum: Function has virtio net device configuration registers and doorbells
+ * for virtio queue pairs.
+ */
+#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
+/* enum: Function has virtio block device configuration registers and a
+ * doorbell for a single virtqueue.
+ */
+#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
+/* enum: Function is a Xilinx acceleration device - management function */
+#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
+/* enum: Function is a Xilinx acceleration device - user function */
+#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
+#define FUNCTION_PERSONALITY_ID_LBN 0
+#define FUNCTION_PERSONALITY_ID_WIDTH 32
+
+/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID
+ * (interface/PF/VF tuple)
+ */
+#define PCIE_FUNCTION_LEN 8
+/* PCIe PF function number */
+#define PCIE_FUNCTION_PF_OFST 0
+#define PCIE_FUNCTION_PF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define PCIE_FUNCTION_PF_ANY 0xfffe
+/* enum: Value representing invalid (null) function */
+#define PCIE_FUNCTION_PF_NULL 0xffff
+#define PCIE_FUNCTION_PF_LBN 0
+#define PCIE_FUNCTION_PF_WIDTH 16
+/* PCIe VF Function number (PF relative) */
+#define PCIE_FUNCTION_VF_OFST 2
+#define PCIE_FUNCTION_VF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define PCIE_FUNCTION_VF_ANY 0xfffe
+/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF ==
+ * PF_NULL)
+ */
+#define PCIE_FUNCTION_VF_NULL 0xffff
+#define PCIE_FUNCTION_VF_LBN 16
+#define PCIE_FUNCTION_VF_WIDTH 16
+/* PCIe interface of the function */
+#define PCIE_FUNCTION_INTF_OFST 4
+#define PCIE_FUNCTION_INTF_LEN 4
+/* enum: Host PCIe interface */
+#define PCIE_FUNCTION_INTF_HOST 0x0
+/* enum: Application Processor interface */
+#define PCIE_FUNCTION_INTF_AP 0x1
+#define PCIE_FUNCTION_INTF_LBN 32
+#define PCIE_FUNCTION_INTF_WIDTH 32
+
+#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 6983799e1c05..138bca611341 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -527,7 +527,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
+ unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
/* There may be existing transmits on the channel that are
* waiting for this packet to trigger the doorbell write.
* We need to send the packets at this point.
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d937af18973e..0c68c7f8056d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &smsc9420_netdev_ops;
dev->ethtool_ops = &smsc9420_ethtool_ops;
- netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_POLL_WEIGHT);
result = register_netdev(dev);
if (result) {
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index 409e82b2018a..876410a256c6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -15,7 +15,6 @@
/* interrupt deassertion in multiples of 10us */
#define INT_DEAS_TIME (50)
-#define NAPI_WEIGHT (64)
#define SMSC_BAR (3)
#ifdef __BIG_ENDIAN
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 84651207a1de..bd52fb7cf486 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
is required by i.MX8MP.
- is optinoal for i.MX8DXL.
+ is optional for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2525a80353b7..65ae3ae9582f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3643,11 +3643,9 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
@@ -5886,11 +5884,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
ret = eth_mac_addr(ndev, addr);
if (ret)
@@ -6220,11 +6216,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6565,7 +6559,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return -ENETDOWN;
if (!stmmac_xdp_is_enabled(priv))
- return -ENXIO;
+ return -EINVAL;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
@@ -6576,7 +6570,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
/* EQoS does not have per-DMA channel SW interrupt,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5d150c5f3d8..9bc625fccca0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data = 0;
u32 v;
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
+ data = pm_runtime_resume_and_get(priv->device);
+ if (data < 0)
return data;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
u32 value = MII_BUSY;
u32 v;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index affcf92cd3aa..fb30bc5d56cb 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
+ select PHYLINK
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 72acdf802258..abc1e4276cf0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
int ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
dev_err(common->dev, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(common->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d2747e9db286..b7ebd741f284 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -173,11 +173,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
if (!vid)
@@ -203,11 +201,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(common->ale, vid,
@@ -557,11 +553,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
@@ -1214,11 +1208,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
HOST_PORT_NUM, 0, 0);
@@ -2692,9 +2684,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2789,11 +2780,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index ebcc6386cc34..aa32dd905e2b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -8,10 +8,12 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
+#include "cpsw_ale.h"
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_PN_REG_CTL 0x004
@@ -588,12 +590,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct am65_cpsw_qos *qos = &port->qos;
+ struct flow_match_eth_addrs match;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_bc_ratelimit.cookie = cls->cookie;
+ qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_mc_ratelimit.cookie = cls->cookie;
+ qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
+{
+ struct am65_cpsw_qos *qos = &port->qos;
+
+ if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
+ qos->ale_bc_ratelimit.cookie = 0;
+ qos->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
+ }
+
+ if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
+ qos->ale_mc_ratelimit.cookie = 0;
+ qos->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return am65_cpsw_qos_configure_clsflower(port, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return am65_cpsw_qos_delete_clsflower(port, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct am65_cpsw_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(am65_cpsw_qos_block_cb_list);
+
+static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
+ am65_cpsw_qos_setup_tc_block_cb,
+ port, port, true);
+}
+
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index e8f1b6b59e93..fb223b43b196 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -14,11 +14,19 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct am65_cpsw_qos {
struct am65_cpsw_est *est_admin;
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+
+ struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 03575c017500..662435e36805 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
int ret;
u32 reg;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
netif_carrier_off(ndev);
@@ -968,11 +966,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
@@ -1052,11 +1048,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
@@ -1090,11 +1084,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
int i;
@@ -1567,11 +1559,9 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto clean_runtime_disable_ret;
- }
ret = cpsw_probe_dt(&cpsw->data, pdev);
if (ret)
@@ -1649,10 +1639,10 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &cpsw->napi_rx,
cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
- CPSW_POLL_WEIGHT);
+ NAPI_POLL_WEIGHT);
netif_tx_napi_add(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
- CPSW_POLL_WEIGHT);
+ NAPI_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
@@ -1734,11 +1724,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 1ef0aaef5c61..231370e9a801 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -50,6 +50,8 @@
/* ALE_AGING_TIMER */
#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
/**
* struct ale_entry_fld - The ALE tbl entry field description
* @start_bit: field start bit
@@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
return tmp & BITMASK(info->bits);
}
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
static void cpsw_ale_timer(struct timer_list *t)
{
struct cpsw_ale *ale = from_timer(ale, t, timer);
@@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 13fe47687fde..aba4572cfa3b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 79e850fe4621..299319320830 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -498,6 +496,8 @@ static void cpsw_restore(struct cpsw_priv *priv)
/* restore CBS offload */
cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ cpsw_qos_clsflower_resume(priv);
}
static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
@@ -829,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
dev_info(priv->dev, "starting ndev. mode: %s\n",
cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
@@ -985,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
vid = cpsw->slaves[slave_no].port_vlan;
flags = ALE_VLAN | ALE_SECURE;
@@ -1024,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* reset the return code as pm_runtime_get_sync() can return
* non zero values as well.
@@ -1410,7 +1404,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1425,11 +1419,11 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
netif_napi_add(ndev, &cpsw->napi_rx,
cpsw->quirk_irq ?
cpsw_rx_poll : cpsw_rx_mq_poll,
- CPSW_POLL_WEIGHT);
+ NAPI_POLL_WEIGHT);
netif_tx_napi_add(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ?
cpsw_tx_poll : cpsw_tx_mq_poll,
- CPSW_POLL_WEIGHT);
+ NAPI_POLL_WEIGHT);
}
napi_ndev = ndev;
@@ -1921,9 +1915,8 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2048,11 +2041,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 8f6817f346ba..758295c898ac 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -364,7 +364,7 @@ void cpsw_split_res(struct cpsw_common *cpsw)
if (cpsw->tx_ch_num == rlim_ch_num) {
max_rate = consumed_rate;
} else if (!rlim_ch_num) {
- ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
bigest_rate = 0;
max_rate = consumed_rate;
} else {
@@ -379,19 +379,19 @@ void cpsw_split_res(struct cpsw_common *cpsw)
if (max_rate < consumed_rate)
max_rate *= 10;
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
+ ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
(cpsw->tx_ch_num - rlim_ch_num);
bigest_rate = (max_rate - consumed_rate) /
(cpsw->tx_ch_num - rlim_ch_num);
}
/* split tx weight/budget */
- budget = CPSW_POLL_WEIGHT;
+ budget = NAPI_POLL_WEIGHT;
for (i = 0; i < cpsw->tx_ch_num; i++) {
ch_rate = cpdma_chan_get_rate(txv[i].ch);
if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
if (!txv[i].budget)
txv[i].budget++;
if (ch_rate > bigest_rate) {
@@ -417,7 +417,7 @@ void cpsw_split_res(struct cpsw_common *cpsw)
txv[bigest_rate_ch].budget += budget;
/* split rx budget */
- budget = CPSW_POLL_WEIGHT;
+ budget = NAPI_POLL_WEIGHT;
ch_budget = budget / cpsw->rx_ch_num;
for (i = 0; i < cpsw->rx_ch_num; i++) {
cpsw->rxv[i].budget = ch_budget;
@@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ageout = ale_ageout;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -754,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return -EINVAL;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
pm_runtime_put(cpsw->dev);
@@ -970,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev,
return -1;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
bw = qopt->enable ? qopt->idleslope : 0;
ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
@@ -1008,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
if (mqprio->mode != TC_MQPRIO_MODE_DCB)
return -EINVAL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (num_tc) {
for (i = 0; i < 8; i++) {
@@ -1048,6 +1043,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
+
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
@@ -1058,6 +1055,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
case TC_SETUP_QDISC_MQPRIO:
return cpsw_set_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return cpsw_qos_setup_tc_block(ndev, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -1381,3 +1381,202 @@ drop:
page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
+
+static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ u32 port_id;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_bc_ratelimit.cookie = cls->cookie;
+ priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_mc_ratelimit.cookie = cls->cookie;
+ priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return cpsw_qos_clsflower_add_policer(priv, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
+ priv->ale_bc_ratelimit.cookie = 0;
+ priv->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
+ }
+
+ if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
+ priv->ale_mc_ratelimit.cookie = 0;
+ priv->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return cpsw_qos_configure_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return cpsw_qos_delete_clsflower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct cpsw_priv *priv = cb_priv;
+ int ret;
+
+ if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ pm_runtime_put(priv->dev);
+ return ret;
+}
+
+static LIST_HEAD(cpsw_qos_block_cb_list);
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
+ cpsw_qos_setup_tc_block_cb,
+ priv, priv, true);
+}
+
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (priv->ale_bc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
+ priv->ale_bc_ratelimit.rate_packet_ps);
+
+ if (priv->ale_mc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
+ priv->ale_mc_ratelimit.rate_packet_ps);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 74555970730c..34230145ca0b 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -89,7 +89,6 @@ do { \
#define CPDMA_TXCP 0x40
#define CPDMA_RXCP 0x60
-#define CPSW_POLL_WEIGHT 64
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
@@ -364,6 +363,11 @@ struct cpsw_common {
u8 base_mac[ETH_ALEN];
};
+struct cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct cpsw_priv {
struct net_device *ndev;
struct device *dev;
@@ -384,6 +388,8 @@ struct cpsw_priv {
struct cpsw_common *cpsw;
int offload_fwd_mark;
u32 tx_packet_min;
+ struct cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct cpsw_ale_ratelimit ale_mc_ratelimit;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
@@ -411,7 +417,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp {
/* The buf includes headroom compatible with both skb and xdpf */
#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
static inline int cpsw_is_xdpf_handle(void *handle)
{
@@ -462,6 +467,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4b6aed78d392..2a3e4e842fa5 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -113,7 +113,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
-#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
/* Buffer descriptor parameters */
#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
@@ -1422,9 +1421,8 @@ static int emac_dev_open(struct net_device *ndev)
struct phy_device *phydev = NULL;
struct device *phy = NULL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_resume_and_get(&priv->pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
return ret;
@@ -1661,9 +1659,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
u32 stats_clear_mask;
int err;
- err = pm_runtime_get_sync(&priv->pdev->dev);
+ err = pm_runtime_resume_and_get(&priv->pdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, err);
return &ndev->stats;
@@ -1951,12 +1948,11 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
- netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, emac_poll, NAPI_POLL_WEIGHT);
pm_runtime_enable(&pdev->dev);
- rc = pm_runtime_get_sync(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
if (rc < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc);
goto err_napi_del;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index fce2626e34fa..ea3772618043 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -134,11 +134,9 @@ static int davinci_mdio_reset(struct mii_bus *bus)
u32 phy_mask, ver;
int ret;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -232,11 +230,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
(phy_id << 16));
@@ -276,11 +272,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
(phy_id << 16) | (phy_data & USERACCESS_DATA));
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 16507bff652a..21b0e961eab5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -24,7 +24,6 @@
#include "netcp.h"
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
-#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
@@ -2096,8 +2095,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
}
/* NAPI register */
- netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
- netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NAPI_POLL_WEIGHT);
/* Register the network device */
ndev->dev_id = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f47b8358669d..c09cd961edbb 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2270,7 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
netif_napi_add(netdev, &card->napi,
- spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
+ spider_net_poll, NAPI_POLL_WEIGHT);
spider_net_setup_netdev_ops(netdev);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 05b1a0736835..51948e2b3a34 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -44,7 +44,6 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_RX_CSUM_DEFAULT 1
#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-#define SPIDER_NET_NAPI_WEIGHT 64
#define SPIDER_NET_FIRMWARE_SEQS 6
#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index be2b992f24d9..ff0c102cb578 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2846,8 +2846,7 @@ static int velocity_probe(struct device *dev, int irq,
netdev->netdev_ops = &velocity_netdev_ops;
netdev->ethtool_ops = &velocity_ethtool_ops;
- netif_napi_add(netdev, &vptr->napi, velocity_poll,
- VELOCITY_NAPI_WEIGHT);
+ netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT);
netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index d3f960cc7c6e..c02a9654dce6 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -23,7 +23,6 @@
#define VELOCITY_VERSION "1.15"
#define VELOCITY_IO_SIZE 256
-#define VELOCITY_NAPI_WEIGHT 64
#define PKT_BUF_SZ 1540
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 869e362e09c1..3f6b9dfca095 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1515,7 +1515,7 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
return PTR_ERR(lp->sdma_regs);
}
- if (of_get_property(dma_np, "little-endian", NULL)) {
+ if (of_property_read_bool(dma_np, "little-endian")) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index d770b3ac3f74..016a9c4f2c6c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
- * 2007 - 2013 (c) Xilinx, Inc.
+ * Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
#include <linux/module.h>
@@ -91,13 +90,7 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
-#define ALIGNMENT 4
-
-/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -124,7 +117,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +125,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +136,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
const u16 *from_u16_ptr;
u32 align_buffer;
@@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = netdev_alloc_skb(dev, len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /* A new skb should have the data halfword aligned, but this code is
- * here just in case that isn't true. Calculate how many
- * bytes we should reserve to get the data to start on a word
- * boundary
- */
- align = BUFFER_ALIGN(skb->data);
- if (align)
- skb_reserve(skb, align);
-
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
@@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -846,6 +826,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
}
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 1f382777aa5a..9abbdb71e629 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -271,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev)
ixp_clock.master_irq = platform_get_irq(pdev, 0);
ixp_clock.slave_irq = platform_get_irq(pdev, 1);
if (IS_ERR(ixp_clock.regs) ||
- !ixp_clock.master_irq || !ixp_clock.slave_irq)
+ ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0)
return -ENXIO;
ixp_clock.caps = ptp_ixp_caps;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7db6c135ac6c..2495a5719e1c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -533,14 +533,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
}
}
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
type = gh->proto_type;
+ if (likely(type == htons(ETH_P_TEB)))
+ return call_gro_receive(eth_gro_receive, head, skb);
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out;
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
@@ -563,6 +565,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
+ /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */
+ if (likely(type == htons(ETH_P_TEB)))
+ return eth_gro_complete(skb, nhoff + gh_len);
+
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 441da03c23ee..a9c44f08199d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -45,40 +45,6 @@ config BPQETHER
useful if some other computer on your local network has a direct
amateur radio connection.
-config DMASCC
- tristate "High-speed (DMA) SCC driver for AX.25"
- depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
- depends on VIRT_TO_BUS
- help
- This is a driver for high-speed SCC boards, i.e. those supporting
- DMA on one port. You usually use those boards to connect your
- computer to an amateur radio modem (such as the WA4DSY 56kbps
- modem), in order to send and receive AX.25 packet radio network
- traffic.
-
- Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis
- PackeTwin, and S5SCC/DMA boards. They are detected automatically.
- If you have one of these cards, say Y here and read the AX25-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- This driver can operate multiple boards simultaneously. If you
- compile it as a module (by saying M instead of Y), it will be called
- dmascc. If you don't pass any parameter to the driver, all
- possible I/O addresses are probed. This could irritate other devices
- that are currently not in use. You may specify the list of addresses
- to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the
- kernel image) or "io=addr1,addr2,..." (when loaded as a module). The
- network interfaces will be called dmascc0 and dmascc1 for the board
- detected first, dmascc2 and dmascc3 for the second one, and so on.
-
- Before you configure each interface with ifconfig, you MUST set
- certain parameters, such as channel access timing, clock mode, and
- DMA channel. This is accomplished with a small utility program,
- dmascc_cfg, available at
- <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to
- get at least version 1.27 of dmascc_cfg, as older versions will not
- work with the current driver.
-
config SCC
tristate "Z8530 SCC driver"
depends on ISA && AX25 && ISA_DMA_API
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 7a1518d763e3..25fc400369ba 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -11,7 +11,6 @@
# Christoph Hellwig <hch@infradead.org>
#
-obj-$(CONFIG_DMASCC) += dmascc.o
obj-$(CONFIG_SCC) += scc.o
obj-$(CONFIG_MKISS) += mkiss.o
obj-$(CONFIG_6PACK) += 6pack.o
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
deleted file mode 100644
index a2a12208e3ad..000000000000
--- a/drivers/net/hamradio/dmascc.c
+++ /dev/null
@@ -1,1450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for high-speed SCC boards (those with DMA support)
- * Copyright (C) 1997-2000 Klaus Kudielka
- *
- * S5SCC/DMA support by Janko Koleznik S52HI
- */
-
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
-#include <linux/sockios.h>
-#include <linux/workqueue.h>
-#include <linux/atomic.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-#include <net/ax25.h>
-#include "z8530.h"
-
-
-/* Number of buffers per channel */
-
-#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
-#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
-#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
-
-
-/* Cards supported */
-
-#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
- 0, 8, 1843200, 3686400 }
-#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
- 0, 8, 3686400, 7372800 }
-#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
- 0, 4, 6144000, 6144000 }
-#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
- 0, 8, 4915200, 9830400 }
-
-#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
-
-#define TMR_0_HZ 25600 /* Frequency of timer 0 */
-
-#define TYPE_PI 0
-#define TYPE_PI2 1
-#define TYPE_TWIN 2
-#define TYPE_S5 3
-#define NUM_TYPES 4
-
-#define MAX_NUM_DEVS 32
-
-
-/* SCC chips supported */
-
-#define Z8530 0
-#define Z85C30 1
-#define Z85230 2
-
-#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
-
-
-/* I/O registers */
-
-/* 8530 registers relative to card base */
-#define SCCB_CMD 0x00
-#define SCCB_DATA 0x01
-#define SCCA_CMD 0x02
-#define SCCA_DATA 0x03
-
-/* 8253/8254 registers relative to card base */
-#define TMR_CNT0 0x00
-#define TMR_CNT1 0x01
-#define TMR_CNT2 0x02
-#define TMR_CTRL 0x03
-
-/* Additional PI/PI2 registers relative to card base */
-#define PI_DREQ_MASK 0x04
-
-/* Additional PackeTwin registers relative to card base */
-#define TWIN_INT_REG 0x08
-#define TWIN_CLR_TMR1 0x09
-#define TWIN_CLR_TMR2 0x0a
-#define TWIN_SPARE_1 0x0b
-#define TWIN_DMA_CFG 0x08
-#define TWIN_SERIAL_CFG 0x09
-#define TWIN_DMA_CLR_FF 0x0a
-#define TWIN_SPARE_2 0x0b
-
-
-/* PackeTwin I/O register values */
-
-/* INT_REG */
-#define TWIN_SCC_MSK 0x01
-#define TWIN_TMR1_MSK 0x02
-#define TWIN_TMR2_MSK 0x04
-#define TWIN_INT_MSK 0x07
-
-/* SERIAL_CFG */
-#define TWIN_DTRA_ON 0x01
-#define TWIN_DTRB_ON 0x02
-#define TWIN_EXTCLKA 0x04
-#define TWIN_EXTCLKB 0x08
-#define TWIN_LOOPA_ON 0x10
-#define TWIN_LOOPB_ON 0x20
-#define TWIN_EI 0x80
-
-/* DMA_CFG */
-#define TWIN_DMA_HDX_T1 0x08
-#define TWIN_DMA_HDX_R1 0x0a
-#define TWIN_DMA_HDX_T3 0x14
-#define TWIN_DMA_HDX_R3 0x16
-#define TWIN_DMA_FDX_T3R1 0x1b
-#define TWIN_DMA_FDX_T1R3 0x1d
-
-
-/* Status values */
-
-#define IDLE 0
-#define TX_HEAD 1
-#define TX_DATA 2
-#define TX_PAUSE 3
-#define TX_TAIL 4
-#define RTS_OFF 5
-#define WAIT 6
-#define DCD_ON 7
-#define RX_ON 8
-#define DCD_OFF 9
-
-
-/* Ioctls */
-
-#define SIOCGSCCPARAM SIOCDEVPRIVATE
-#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
-
-
-/* Data types */
-
-struct scc_param {
- int pclk_hz; /* frequency of BRG input (don't change) */
- int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
- int nrzi; /* 0 (nrz), 1 (nrzi) */
- int clocks; /* see dmascc_cfg documentation */
- int txdelay; /* [1/TMR_0_HZ] */
- int txtimeout; /* [1/HZ] */
- int txtail; /* [1/TMR_0_HZ] */
- int waittime; /* [1/TMR_0_HZ] */
- int slottime; /* [1/TMR_0_HZ] */
- int persist; /* 1 ... 256 */
- int dma; /* -1 (disable), 0, 1, 3 */
- int txpause; /* [1/TMR_0_HZ] */
- int rtsoff; /* [1/TMR_0_HZ] */
- int dcdon; /* [1/TMR_0_HZ] */
- int dcdoff; /* [1/TMR_0_HZ] */
-};
-
-struct scc_hardware {
- char *name;
- int io_region;
- int io_delta;
- int io_size;
- int num_devs;
- int scc_offset;
- int tmr_offset;
- int tmr_hz;
- int pclk_hz;
-};
-
-struct scc_priv {
- int type;
- int chip;
- struct net_device *dev;
- struct scc_info *info;
-
- int channel;
- int card_base, scc_cmd, scc_data;
- int tmr_cnt, tmr_ctrl, tmr_mode;
- struct scc_param param;
- char rx_buf[NUM_RX_BUF][BUF_SIZE];
- int rx_len[NUM_RX_BUF];
- int rx_ptr;
- struct work_struct rx_work;
- int rx_head, rx_tail, rx_count;
- int rx_over;
- char tx_buf[NUM_TX_BUF][BUF_SIZE];
- int tx_len[NUM_TX_BUF];
- int tx_ptr;
- int tx_head, tx_tail, tx_count;
- int state;
- unsigned long tx_start;
- int rr0;
- spinlock_t *register_lock; /* Per scc_info */
- spinlock_t ring_lock;
-};
-
-struct scc_info {
- int irq_used;
- int twin_serial_cfg;
- struct net_device *dev[2];
- struct scc_priv priv[2];
- struct scc_info *next;
- spinlock_t register_lock; /* Per device register lock */
-};
-
-
-/* Function declarations */
-static int setup_adapter(int card_base, int type, int n) __init;
-
-static void write_scc(struct scc_priv *priv, int reg, int val);
-static void write_scc_data(struct scc_priv *priv, int val, int fast);
-static int read_scc(struct scc_priv *priv, int reg);
-static int read_scc_data(struct scc_priv *priv);
-
-static int scc_open(struct net_device *dev);
-static int scc_close(struct net_device *dev);
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
-static int scc_set_mac_address(struct net_device *dev, void *sa);
-
-static inline void tx_on(struct scc_priv *priv);
-static inline void rx_on(struct scc_priv *priv);
-static inline void rx_off(struct scc_priv *priv);
-static void start_timer(struct scc_priv *priv, int t, int r15);
-static inline unsigned char random(void);
-
-static inline void z8530_isr(struct scc_info *info);
-static irqreturn_t scc_isr(int irq, void *dev_id);
-static void rx_isr(struct scc_priv *priv);
-static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(struct work_struct *);
-static void tx_isr(struct scc_priv *priv);
-static void es_isr(struct scc_priv *priv);
-static void tm_isr(struct scc_priv *priv);
-
-
-/* Initialization variables */
-
-static int io[MAX_NUM_DEVS] __initdata = { 0, };
-
-/* Beware! hw[] is also used in dmascc_exit(). */
-static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
-
-
-/* Global variables */
-
-static struct scc_info *first;
-static unsigned long rand;
-
-
-MODULE_AUTHOR("Klaus Kudielka");
-MODULE_DESCRIPTION("Driver for high-speed SCC boards");
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_LICENSE("GPL");
-
-static void __exit dmascc_exit(void)
-{
- int i;
- struct scc_info *info;
-
- while (first) {
- info = first;
-
- /* Unregister devices */
- for (i = 0; i < 2; i++)
- unregister_netdev(info->dev[i]);
-
- /* Reset board */
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- release_region(info->dev[0]->base_addr,
- hw[info->priv[0].type].io_size);
-
- for (i = 0; i < 2; i++)
- free_netdev(info->dev[i]);
-
- /* Free memory */
- first = info->next;
- kfree(info);
- }
-}
-
-static int __init dmascc_init(void)
-{
- int h, i, j, n;
- int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
- t1[MAX_NUM_DEVS];
- unsigned t_val;
- unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
- counting[MAX_NUM_DEVS];
-
- /* Initialize random number generator */
- rand = jiffies;
- /* Cards found = 0 */
- n = 0;
- /* Warning message */
- if (!io[0])
- printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
-
- /* Run autodetection for each card type */
- for (h = 0; h < NUM_TYPES; h++) {
-
- if (io[0]) {
- /* User-specified I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- base[i] = 0;
- for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
- j = (io[i] -
- hw[h].io_region) / hw[h].io_delta;
- if (j >= 0 && j < hw[h].num_devs &&
- hw[h].io_region +
- j * hw[h].io_delta == io[i]) {
- base[j] = io[i];
- }
- }
- } else {
- /* Default I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++) {
- base[i] =
- hw[h].io_region + i * hw[h].io_delta;
- }
- }
-
- /* Check valid I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if (!request_region
- (base[i], hw[h].io_size, "dmascc"))
- base[i] = 0;
- else {
- tcmd[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CTRL;
- t0[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT0;
- t1[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT1;
- }
- }
-
- /* Start timers */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
- outb(0x36, tcmd[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
- t0[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
- t0[i]);
- /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
- outb(0x70, tcmd[i]);
- outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
- outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
- start[i] = jiffies;
- delay[i] = 0;
- counting[i] = 1;
- /* Timer 2: LSB+MSB, Mode 0 */
- outb(0xb0, tcmd[i]);
- }
- time = jiffies;
- /* Wait until counter registers are loaded */
- udelay(2000000 / TMR_0_HZ);
-
- /* Timing loop */
- while (time_is_after_jiffies(time + 13)) {
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i] && counting[i]) {
- /* Read back Timer 1: latch; read LSB; read MSB */
- outb(0x40, tcmd[i]);
- t_val =
- inb(t1[i]) + (inb(t1[i]) << 8);
- /* Also check whether counter did wrap */
- if (t_val == 0 ||
- t_val > TMR_0_HZ / HZ * 10)
- counting[i] = 0;
- delay[i] = jiffies - start[i];
- }
- }
-
- /* Evaluate measurements */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if ((delay[i] >= 9 && delay[i] <= 11) &&
- /* Ok, we have found an adapter */
- (setup_adapter(base[i], h, n) == 0))
- n++;
- else
- release_region(base[i],
- hw[h].io_size);
- }
-
- } /* NUM_TYPES */
-
- /* If any adapter was successfully initialized, return ok */
- if (n)
- return 0;
-
- /* If no adapter found, return error */
- printk(KERN_INFO "dmascc: no adapters found\n");
- return -EIO;
-}
-
-module_init(dmascc_init);
-module_exit(dmascc_exit);
-
-static void __init dev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_AX25;
- dev->hard_header_len = AX25_MAX_HEADER_LEN;
- dev->mtu = 1500;
- dev->addr_len = AX25_ADDR_LEN;
- dev->tx_queue_len = 64;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- dev_addr_set(dev, (u8 *)&ax25_defaddr);
-}
-
-static const struct net_device_ops scc_netdev_ops = {
- .ndo_open = scc_open,
- .ndo_stop = scc_close,
- .ndo_start_xmit = scc_send_packet,
- .ndo_siocdevprivate = scc_siocdevprivate,
- .ndo_set_mac_address = scc_set_mac_address,
-};
-
-static int __init setup_adapter(int card_base, int type, int n)
-{
- int i, irq, chip, err;
- struct scc_info *info;
- struct net_device *dev;
- struct scc_priv *priv;
- unsigned long time;
- unsigned int irqs;
- int tmr_base = card_base + hw[type].tmr_offset;
- int scc_base = card_base + hw[type].scc_offset;
- char *chipnames[] = CHIPNAMES;
-
- /* Initialize what is necessary for write_scc and write_scc_data */
- info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[0]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out1;
- }
-
- info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[1]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out2;
- }
- spin_lock_init(&info->register_lock);
-
- priv = &info->priv[0];
- priv->type = type;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + SCCA_CMD;
- priv->scc_data = scc_base + SCCA_DATA;
- priv->register_lock = &info->register_lock;
-
- /* Reset SCC */
- write_scc(priv, R9, FHWRES | MIE | NV);
-
- /* Determine type of chip by enabling SDLC/HDLC enhancements */
- write_scc(priv, R15, SHDLCE);
- if (!read_scc(priv, R15)) {
- /* WR7' not present. This is an ordinary Z8530 SCC. */
- chip = Z8530;
- } else {
- /* Put one character in TX FIFO */
- write_scc_data(priv, 0, 0);
- if (read_scc(priv, R0) & Tx_BUF_EMP) {
- /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
- chip = Z85230;
- } else {
- /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
- chip = Z85C30;
- }
- }
- write_scc(priv, R15, 0);
-
- /* Start IRQ auto-detection */
- irqs = probe_irq_on();
-
- /* Enable interrupts */
- if (type == TYPE_TWIN) {
- outb(0, card_base + TWIN_DMA_CFG);
- inb(card_base + TWIN_CLR_TMR1);
- inb(card_base + TWIN_CLR_TMR2);
- info->twin_serial_cfg = TWIN_EI;
- outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
- } else {
- write_scc(priv, R15, CTSIE);
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R1, EXT_INT_ENAB);
- }
-
- /* Start timer */
- outb(1, tmr_base + TMR_CNT1);
- outb(0, tmr_base + TMR_CNT1);
-
- /* Wait and detect IRQ */
- time = jiffies;
- while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ));
- irq = probe_irq_off(irqs);
-
- /* Clear pending interrupt, disable interrupts */
- if (type == TYPE_TWIN) {
- inb(card_base + TWIN_CLR_TMR1);
- } else {
- write_scc(priv, R1, 0);
- write_scc(priv, R15, 0);
- write_scc(priv, R0, RES_EXT_INT);
- }
-
- if (irq <= 0) {
- printk(KERN_ERR
- "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
- hw[type].name, card_base, irq);
- err = -ENODEV;
- goto out3;
- }
-
- /* Set up data structures */
- for (i = 0; i < 2; i++) {
- dev = info->dev[i];
- priv = &info->priv[i];
- priv->type = type;
- priv->chip = chip;
- priv->dev = dev;
- priv->info = info;
- priv->channel = i;
- spin_lock_init(&priv->ring_lock);
- priv->register_lock = &info->register_lock;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
- priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
- priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
- priv->tmr_ctrl = tmr_base + TMR_CTRL;
- priv->tmr_mode = i ? 0xb0 : 0x70;
- priv->param.pclk_hz = hw[type].pclk_hz;
- priv->param.brg_tc = -1;
- priv->param.clocks = TCTRxCP | RCRTxCP;
- priv->param.persist = 256;
- priv->param.dma = -1;
- INIT_WORK(&priv->rx_work, rx_bh);
- dev->ml_priv = priv;
- snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
- dev->base_addr = card_base;
- dev->irq = irq;
- dev->netdev_ops = &scc_netdev_ops;
- dev->header_ops = &ax25_header_ops;
- }
- if (register_netdev(info->dev[0])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[0]->name);
- err = -ENODEV;
- goto out3;
- }
- if (register_netdev(info->dev[1])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[1]->name);
- err = -ENODEV;
- goto out4;
- }
-
-
- info->next = first;
- first = info;
- printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
- hw[type].name, chipnames[chip], card_base, irq);
- return 0;
-
- out4:
- unregister_netdev(info->dev[0]);
- out3:
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- free_netdev(info->dev[1]);
- out2:
- free_netdev(info->dev[0]);
- out1:
- kfree(info);
- out:
- return err;
-}
-
-
-/* Driver functions */
-
-static void write_scc(struct scc_priv *priv, int reg, int val)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- outb(val, priv->scc_cmd);
- return;
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- return;
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return;
- }
-}
-
-
-static void write_scc_data(struct scc_priv *priv, int val, int fast)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- outb(val, priv->scc_data);
- return;
- case TYPE_TWIN:
- outb_p(val, priv->scc_data);
- return;
- default:
- if (fast)
- outb_p(val, priv->scc_data);
- else {
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- outb_p(val, priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- }
- return;
- }
-}
-
-
-static int read_scc(struct scc_priv *priv, int reg)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- return inb(priv->scc_cmd);
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- return inb_p(priv->scc_cmd);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- rc = inb_p(priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int read_scc_data(struct scc_priv *priv)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- return inb(priv->scc_data);
- case TYPE_TWIN:
- return inb_p(priv->scc_data);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- rc = inb_p(priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int scc_open(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- /* Request IRQ if not already used by other channel */
- if (!info->irq_used) {
- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
- return -EAGAIN;
- }
- }
- info->irq_used++;
-
- /* Request DMA if required */
- if (priv->param.dma >= 0) {
- if (request_dma(priv->param.dma, "dmascc")) {
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
- return -EAGAIN;
- } else {
- unsigned long flags = claim_dma_lock();
- clear_dma_ff(priv->param.dma);
- release_dma_lock(flags);
- }
- }
-
- /* Initialize local variables */
- priv->rx_ptr = 0;
- priv->rx_over = 0;
- priv->rx_head = priv->rx_tail = priv->rx_count = 0;
- priv->state = IDLE;
- priv->tx_head = priv->tx_tail = priv->tx_count = 0;
- priv->tx_ptr = 0;
-
- /* Reset channel */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- /* X1 clock, SDLC mode */
- write_scc(priv, R4, SDLC | X1CLK);
- /* DMA */
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* 8 bit RX char, RX disable */
- write_scc(priv, R3, Rx8);
- /* 8 bit TX char, TX disable */
- write_scc(priv, R5, Tx8);
- /* SDLC address field */
- write_scc(priv, R6, 0);
- /* SDLC flag */
- write_scc(priv, R7, FLAG);
- switch (priv->chip) {
- case Z85C30:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* Auto EOM reset */
- write_scc(priv, R7, AUTOEOM);
- write_scc(priv, R15, 0);
- break;
- case Z85230:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* The following bits are set (see 2.5.2.1):
- - Automatic EOM reset
- - Interrupt request if RX FIFO is half full
- This bit should be ignored in DMA mode (according to the
- documentation), but actually isn't. The receiver doesn't work if
- it is set. Thus, we have to clear it in DMA mode.
- - Interrupt/DMA request if TX FIFO is completely empty
- a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
- compatibility).
- b) If cleared, DMA requests may follow each other very quickly,
- filling up the TX FIFO.
- Advantage: TX works even in case of high bus latency.
- Disadvantage: Edge-triggered DMA request circuitry may miss
- a request. No more data is delivered, resulting
- in a TX FIFO underrun.
- Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
- The PackeTwin doesn't. I don't know about the PI, but let's
- assume it behaves like the PI2.
- */
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- write_scc(priv, R7, AUTOEOM | TXFIFOE);
- else
- write_scc(priv, R7, AUTOEOM);
- } else {
- write_scc(priv, R7, AUTOEOM | RXFIFOH);
- }
- write_scc(priv, R15, 0);
- break;
- }
- /* Preset CRC, NRZ(I) encoding */
- write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
-
- /* Configure baud rate generator */
- if (priv->param.brg_tc >= 0) {
- /* Program BR generator */
- write_scc(priv, R12, priv->param.brg_tc & 0xFF);
- write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
- /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
- PackeTwin, not connected on the PI2); set DPLL source to BRG */
- write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
- /* Enable DPLL */
- write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
- } else {
- /* Disable BR generator */
- write_scc(priv, R14, DTRREQ | BRSRC);
- }
-
- /* Configure clocks */
- if (priv->type == TYPE_TWIN) {
- /* Disable external TX clock receiver */
- outb((info->twin_serial_cfg &=
- ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
- write_scc(priv, R11, priv->param.clocks);
- if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
- /* Enable external TX clock receiver */
- outb((info->twin_serial_cfg |=
- (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Configure PackeTwin */
- if (priv->type == TYPE_TWIN) {
- /* Assert DTR, enable interrupts */
- outb((info->twin_serial_cfg |= TWIN_EI |
- (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Read current status */
- priv->rr0 = read_scc(priv, R0);
- /* Enable DCD interrupt */
- write_scc(priv, R15, DCDIE);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-static int scc_close(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- netif_stop_queue(dev);
-
- if (priv->type == TYPE_TWIN) {
- /* Drop DTR */
- outb((info->twin_serial_cfg &=
- (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Reset channel, free DMA and IRQ */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- outb(0, card_base + TWIN_DMA_CFG);
- free_dma(priv->param.dma);
- }
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
-
- return 0;
-}
-
-
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
-{
- struct scc_priv *priv = dev->ml_priv;
-
- switch (cmd) {
- case SIOCGSCCPARAM:
- if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- case SIOCSSCCPARAM:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (netif_running(dev))
- return -EAGAIN;
- if (copy_from_user(&priv->param, data,
- sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- unsigned long flags;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP))
- return ax25_ip_xmit(skb);
-
- /* Temporarily stop the scheduler feeding us packets */
- netif_stop_queue(dev);
-
- /* Transfer data to DMA buffer */
- i = priv->tx_head;
- skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
- priv->tx_len[i] = skb->len - 1;
-
- /* Clear interrupts while we touch our circular buffers */
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move the ring buffer's head */
- priv->tx_head = (i + 1) % NUM_TX_BUF;
- priv->tx_count++;
-
- /* If we just filled up the last buffer, leave queue stopped.
- The higher layers must wait until we have a DMA buffer
- to accept the data. */
- if (priv->tx_count < NUM_TX_BUF)
- netif_wake_queue(dev);
-
- /* Set new TX state */
- if (priv->state == IDLE) {
- /* Assert RTS, start timer */
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- }
-
- /* Turn interrupts back on and free buffer */
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-
-static int scc_set_mac_address(struct net_device *dev, void *sa)
-{
- dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
- return 0;
-}
-
-
-static inline void tx_on(struct scc_priv *priv)
-{
- int i, n;
- unsigned long flags;
-
- if (priv->param.dma >= 0) {
- n = (priv->chip == Z85230) ? 3 : 1;
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
- set_dma_count(priv->param.dma,
- priv->tx_len[priv->tx_tail] - n);
- release_dma_lock(flags);
- /* Enable TX underrun interrupt */
- write_scc(priv, R15, TxUIE);
- /* Configure DREQ */
- if (priv->type == TYPE_TWIN)
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
- priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN |
- WT_RDY_ENAB);
- /* Write first byte(s) */
- spin_lock_irqsave(priv->register_lock, flags);
- for (i = 0; i < n; i++)
- write_scc_data(priv,
- priv->tx_buf[priv->tx_tail][i], 1);
- enable_dma(priv->param.dma);
- spin_unlock_irqrestore(priv->register_lock, flags);
- } else {
- write_scc(priv, R15, TxUIE);
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
- tx_isr(priv);
- }
- /* Reset EOM latch if we do not have the AUTOEOM feature */
- if (priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-}
-
-
-static inline void rx_on(struct scc_priv *priv)
-{
- unsigned long flags;
-
- /* Clear RX FIFO */
- while (read_scc(priv, R0) & Rx_CH_AV)
- read_scc_data(priv);
- priv->rx_over = 0;
- if (priv->param.dma >= 0) {
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_READ);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- enable_dma(priv->param.dma);
- /* Configure PackeTwin DMA */
- if (priv->type == TYPE_TWIN) {
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
- priv->card_base + TWIN_DMA_CFG);
- }
- /* Sp. cond. intr. only, ext int enable, RX DMA enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
- WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
- } else {
- /* Reset current frame */
- priv->rx_ptr = 0;
- /* Intr. on all Rx characters and Sp. cond., ext int enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
- WT_FN_RDYFN);
- }
- write_scc(priv, R0, ERR_RES);
- write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
-}
-
-
-static inline void rx_off(struct scc_priv *priv)
-{
- /* Disable receiver */
- write_scc(priv, R3, Rx8);
- /* Disable DREQ / RX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* Disable DMA */
- if (priv->param.dma >= 0)
- disable_dma(priv->param.dma);
-}
-
-
-static void start_timer(struct scc_priv *priv, int t, int r15)
-{
- outb(priv->tmr_mode, priv->tmr_ctrl);
- if (t == 0) {
- tm_isr(priv);
- } else if (t > 0) {
- outb(t & 0xFF, priv->tmr_cnt);
- outb((t >> 8) & 0xFF, priv->tmr_cnt);
- if (priv->type != TYPE_TWIN) {
- write_scc(priv, R15, r15 | CTSIE);
- priv->rr0 |= CTS;
- }
- }
-}
-
-
-static inline unsigned char random(void)
-{
- /* See "Numerical Recipes in C", second edition, p. 284 */
- rand = rand * 1664525L + 1013904223L;
- return (unsigned char) (rand >> 24);
-}
-
-static inline void z8530_isr(struct scc_info *info)
-{
- int is, i = 100;
-
- while ((is = read_scc(&info->priv[0], R3)) && i--) {
- if (is & CHARxIP) {
- rx_isr(&info->priv[0]);
- } else if (is & CHATxIP) {
- tx_isr(&info->priv[0]);
- } else if (is & CHAEXT) {
- es_isr(&info->priv[0]);
- } else if (is & CHBRxIP) {
- rx_isr(&info->priv[1]);
- } else if (is & CHBTxIP) {
- tx_isr(&info->priv[1]);
- } else {
- es_isr(&info->priv[1]);
- }
- write_scc(&info->priv[0], R0, RES_H_IUS);
- i++;
- }
- if (i < 0) {
- printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
- is);
- }
- /* Ok, no interrupts pending from this 8530. The INT line should
- be inactive now. */
-}
-
-
-static irqreturn_t scc_isr(int irq, void *dev_id)
-{
- struct scc_info *info = dev_id;
-
- spin_lock(info->priv[0].register_lock);
- /* At this point interrupts are enabled, and the interrupt under service
- is already acknowledged, but masked off.
-
- Interrupt processing: We loop until we know that the IRQ line is
- low. If another positive edge occurs afterwards during the ISR,
- another interrupt will be triggered by the interrupt controller
- as soon as the IRQ level is enabled again (see asm/irq.h).
-
- Bottom-half handlers will be processed after scc_isr(). This is
- important, since we only have small ringbuffers and want new data
- to be fetched/delivered immediately. */
-
- if (info->priv[0].type == TYPE_TWIN) {
- int is, card_base = info->priv[0].card_base;
- while ((is = ~inb(card_base + TWIN_INT_REG)) &
- TWIN_INT_MSK) {
- if (is & TWIN_SCC_MSK) {
- z8530_isr(info);
- } else if (is & TWIN_TMR1_MSK) {
- inb(card_base + TWIN_CLR_TMR1);
- tm_isr(&info->priv[0]);
- } else {
- inb(card_base + TWIN_CLR_TMR2);
- tm_isr(&info->priv[1]);
- }
- }
- } else
- z8530_isr(info);
- spin_unlock(info->priv[0].register_lock);
- return IRQ_HANDLED;
-}
-
-
-static void rx_isr(struct scc_priv *priv)
-{
- if (priv->param.dma >= 0) {
- /* Check special condition and perform error reset. See 2.4.7.5. */
- special_condition(priv, read_scc(priv, R1));
- write_scc(priv, R0, ERR_RES);
- } else {
- /* Check special condition for each character. Error reset not necessary.
- Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
- int rc;
- while (read_scc(priv, R0) & Rx_CH_AV) {
- rc = read_scc(priv, R1);
- if (priv->rx_ptr < BUF_SIZE)
- priv->rx_buf[priv->rx_head][priv->
- rx_ptr++] =
- read_scc_data(priv);
- else {
- priv->rx_over = 2;
- read_scc_data(priv);
- }
- special_condition(priv, rc);
- }
- }
-}
-
-
-static void special_condition(struct scc_priv *priv, int rc)
-{
- int cb;
- unsigned long flags;
-
- /* See Figure 2-15. Only overrun and EOF need to be checked. */
-
- if (rc & Rx_OVR) {
- /* Receiver overrun */
- priv->rx_over = 1;
- if (priv->param.dma < 0)
- write_scc(priv, R0, ERR_RES);
- } else if (rc & END_FR) {
- /* End of frame. Get byte count */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
- 2;
- release_dma_lock(flags);
- } else {
- cb = priv->rx_ptr - 2;
- }
- if (priv->rx_over) {
- /* We had an overrun */
- priv->dev->stats.rx_errors++;
- if (priv->rx_over == 2)
- priv->dev->stats.rx_length_errors++;
- else
- priv->dev->stats.rx_fifo_errors++;
- priv->rx_over = 0;
- } else if (rc & CRC_ERR) {
- /* Count invalid CRC only if packet length >= minimum */
- if (cb >= 15) {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_crc_errors++;
- }
- } else {
- if (cb >= 15) {
- if (priv->rx_count < NUM_RX_BUF - 1) {
- /* Put good frame in FIFO */
- priv->rx_len[priv->rx_head] = cb;
- priv->rx_head =
- (priv->rx_head +
- 1) % NUM_RX_BUF;
- priv->rx_count++;
- schedule_work(&priv->rx_work);
- } else {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_over_errors++;
- }
- }
- }
- /* Get ready for new frame */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- } else {
- priv->rx_ptr = 0;
- }
- }
-}
-
-
-static void rx_bh(struct work_struct *ugli_api)
-{
- struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
- int i = priv->rx_tail;
- int cb;
- unsigned long flags;
- struct sk_buff *skb;
- unsigned char *data;
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- while (priv->rx_count) {
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- cb = priv->rx_len[i];
- /* Allocate buffer */
- skb = dev_alloc_skb(cb + 1);
- if (skb == NULL) {
- /* Drop packet */
- priv->dev->stats.rx_dropped++;
- } else {
- /* Fill buffer */
- data = skb_put(skb, cb + 1);
- data[0] = 0;
- memcpy(&data[1], priv->rx_buf[i], cb);
- skb->protocol = ax25_type_trans(skb, priv->dev);
- netif_rx(skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += cb;
- }
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move tail */
- priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
- priv->rx_count--;
- }
- spin_unlock_irqrestore(&priv->ring_lock, flags);
-}
-
-
-static void tx_isr(struct scc_priv *priv)
-{
- int i = priv->tx_tail, p = priv->tx_ptr;
-
- /* Suspend TX interrupts if we don't want to send anything.
- See Figure 2-22. */
- if (p == priv->tx_len[i]) {
- write_scc(priv, R0, RES_Tx_P);
- return;
- }
-
- /* Write characters */
- while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
- write_scc_data(priv, priv->tx_buf[i][p++], 0);
- }
-
- /* Reset EOM latch of Z8530 */
- if (!priv->tx_ptr && p && priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-
- priv->tx_ptr = p;
-}
-
-
-static void es_isr(struct scc_priv *priv)
-{
- int i, rr0, drr0, res;
- unsigned long flags;
-
- /* Read status, reset interrupt bit (open latches) */
- rr0 = read_scc(priv, R0);
- write_scc(priv, R0, RES_EXT_INT);
- drr0 = priv->rr0 ^ rr0;
- priv->rr0 = rr0;
-
- /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
- it might have already been cleared again by AUTOEOM. */
- if (priv->state == TX_DATA) {
- /* Get remaining bytes */
- i = priv->tx_tail;
- if (priv->param.dma >= 0) {
- disable_dma(priv->param.dma);
- flags = claim_dma_lock();
- res = get_dma_residue(priv->param.dma);
- release_dma_lock(flags);
- } else {
- res = priv->tx_len[i] - priv->tx_ptr;
- priv->tx_ptr = 0;
- }
- /* Disable DREQ / TX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- if (res) {
- /* Update packet statistics */
- priv->dev->stats.tx_errors++;
- priv->dev->stats.tx_fifo_errors++;
- /* Other underrun interrupts may already be waiting */
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R0, RES_EXT_INT);
- } else {
- /* Update packet statistics */
- priv->dev->stats.tx_packets++;
- priv->dev->stats.tx_bytes += priv->tx_len[i];
- /* Remove frame from FIFO */
- priv->tx_tail = (i + 1) % NUM_TX_BUF;
- priv->tx_count--;
- /* Inform upper layers */
- netif_wake_queue(priv->dev);
- }
- /* Switch state */
- write_scc(priv, R15, 0);
- if (priv->tx_count &&
- time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) {
- priv->state = TX_PAUSE;
- start_timer(priv, priv->param.txpause, 0);
- } else {
- priv->state = TX_TAIL;
- start_timer(priv, priv->param.txtail, 0);
- }
- }
-
- /* DCD transition */
- if (drr0 & DCD) {
- if (rr0 & DCD) {
- switch (priv->state) {
- case IDLE:
- case WAIT:
- priv->state = DCD_ON;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdon, 0);
- }
- } else {
- switch (priv->state) {
- case RX_ON:
- rx_off(priv);
- priv->state = DCD_OFF;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdoff, 0);
- }
- }
- }
-
- /* CTS transition */
- if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
- tm_isr(priv);
-
-}
-
-
-static void tm_isr(struct scc_priv *priv)
-{
- switch (priv->state) {
- case TX_HEAD:
- case TX_PAUSE:
- tx_on(priv);
- priv->state = TX_DATA;
- break;
- case TX_TAIL:
- write_scc(priv, R5, TxCRC_ENAB | Tx8);
- priv->state = RTS_OFF;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.rtsoff, 0);
- break;
- case RTS_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- priv->dev->stats.collisions++;
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv, priv->param.waittime, DCDIE);
- }
- break;
- case WAIT:
- if (priv->tx_count) {
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5,
- TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- } else {
- priv->state = IDLE;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, DCDIE);
- }
- break;
- case DCD_ON:
- case DCD_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv,
- random() / priv->param.persist *
- priv->param.slottime, DCDIE);
- }
- break;
- }
-}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index cf69da0e296c..25b38a374e3c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/hyperv.h>
#include <linux/rndis.h>
+#include <linux/jhash.h>
/* RSS related */
#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
@@ -237,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp);
unsigned int netvsc_xdp_fraglen(unsigned int len);
@@ -246,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netvsc_device *nvdev);
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
@@ -942,12 +946,21 @@ struct nvsc_rsc {
#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
-struct netvsc_stats {
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
u64 packets;
u64 bytes;
u64 broadcast;
u64 multicast;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
struct u64_stats_sync syncp;
};
@@ -1046,6 +1059,55 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
@@ -1060,9 +1122,10 @@ struct netvsc_channel {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
- struct netvsc_stats tx_stats;
- struct netvsc_stats rx_stats;
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9442f751ad3a..6e42cb03e226 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/filter.h>
#include <asm/sync_bitops.h>
#include <asm/mshyperv.h>
@@ -792,9 +793,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
int queue_sends;
u64 cmd_rqst;
- cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
+ cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Incorrect transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -805,7 +806,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
- struct netvsc_stats *tx_stats;
+ struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
@@ -854,9 +855,9 @@ static void netvsc_send_completion(struct net_device *ndev,
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
- (u64)desc->trans_id);
+ desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Invalid transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -1670,12 +1671,17 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
+ nvchan->xdp_flush = false;
+
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 7856905414eb..4a9522689fa4 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
@@ -23,11 +24,13 @@
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
+ bool drop = true;
xdp->data_hard_start = NULL;
@@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
switch (act) {
case XDP_PASS:
case XDP_TX:
+ drop = false;
+ break;
+
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
@@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
out:
rcu_read_unlock();
- if (page && act != XDP_PASS && act != XDP_TX) {
+ if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
@@ -137,7 +165,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
- bpf_op_t ndo_bpf;
int ret;
ASSERT_RTNL();
@@ -145,8 +172,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
if (!vf_netdev)
return 0;
- ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
- if (!ndo_bpf)
+ if (!vf_netdev->netdev_ops->ndo_bpf)
return 0;
memset(&xdp, 0, sizeof(xdp));
@@ -157,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = ndo_bpf(vf_netdev, &xdp);
+ ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
@@ -199,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return -EINVAL;
}
}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fde1c492ca02..27f6bbca6619 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg,
return ppi + 1;
}
-/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
- * packets. We can use ethtool to change UDP hash level when necessary.
- */
-static inline u32 netvsc_get_hash(
- struct sk_buff *skb,
- const struct net_device_context *ndc)
-{
- struct flow_keys flow;
- u32 hash, pkt_proto = 0;
- static u32 hashrnd __read_mostly;
-
- net_get_random_once(&hashrnd, sizeof(hashrnd));
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- return 0;
-
- switch (flow.basic.ip_proto) {
- case IPPROTO_TCP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_TCP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_TCP6_L4HASH;
-
- break;
-
- case IPPROTO_UDP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_UDP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_UDP6_L4HASH;
-
- break;
- }
-
- if (pkt_proto & ndc->l4_hash) {
- return skb_get_hash(skb);
- } else {
- if (flow.basic.n_proto == htons(ETH_P_IP))
- hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
- else
- return 0;
-
- __skb_set_sw_hash(skb, hash, false);
- }
-
- return hash;
-}
-
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
@@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
}
/* This function should only be called after skb_record_rx_queue() */
-static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
@@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net,
act = netvsc_run_xdp(net, nvchan, &xdp);
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
@@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net,
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net,
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- multicast = stats->multicast + stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1515,8 +1473,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
- const struct netvsc_stats *qstats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
@@ -1562,26 +1524,32 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
- qstats = &nvdev->chan_table[j].tx_stats;
+ tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
- qstats = &nvdev->chan_table[j].rx_stats;
+ rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- xdp_drop = qstats->xdp_drop;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1622,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
@@ -2057,6 +2028,7 @@ static const struct net_device_ops device_ops = {
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 0f7c6dc2ed15..95da876c5613 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -33,13 +33,6 @@ config IEEE802154_AT86RF230
This driver can also be built as a module. To do so, say M here.
the module will be called 'at86rf230'.
-config IEEE802154_AT86RF230_DEBUGFS
- depends on IEEE802154_AT86RF230
- bool "AT86RF230 debugfs interface"
- depends on DEBUG_FS
- help
- This option compiles debugfs code for the at86rf230 driver.
-
config IEEE802154_MRF24J40
tristate "Microchip MRF24J40 transceiver driver"
depends on IEEE802154_DRIVERS && MAC802154
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 549d04b5f3d4..15f283b26721 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,7 +23,6 @@
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
-#include <linux/debugfs.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
@@ -72,19 +71,11 @@ struct at86rf230_state_change {
void (*complete)(void *context);
u8 from_state;
u8 to_state;
+ int trac;
bool free;
};
-struct at86rf230_trac {
- u64 success;
- u64 success_data_pending;
- u64 success_wait_for_ack;
- u64 channel_access_failure;
- u64 no_ack;
- u64 invalid;
-};
-
struct at86rf230_local {
struct spi_device *spi;
@@ -104,8 +95,6 @@ struct at86rf230_local {
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
-
- struct at86rf230_trac trac;
};
#define AT86RF2XX_NUMREGS 0x3F
@@ -346,8 +335,7 @@ at86rf230_async_error_recover_complete(void *context)
if (lp->was_tx) {
lp->was_tx = 0;
- dev_kfree_skb_any(lp->tx_skb);
- ieee802154_wake_queue(lp->hw);
+ ieee802154_xmit_hw_error(lp->hw, lp->tx_skb);
}
}
@@ -653,7 +641,11 @@ at86rf230_tx_complete(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+ if (ctx->trac == IEEE802154_SUCCESS)
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+ else
+ ieee802154_xmit_error(lp->hw, lp->tx_skb, ctx->trac);
+
kfree(ctx);
}
@@ -672,30 +664,21 @@ at86rf230_tx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
+ u8 trac = TRAC_MASK(ctx->buf[1]);
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
- u8 trac = TRAC_MASK(ctx->buf[1]);
-
- switch (trac) {
- case TRAC_SUCCESS:
- lp->trac.success++;
- break;
- case TRAC_SUCCESS_DATA_PENDING:
- lp->trac.success_data_pending++;
- break;
- case TRAC_CHANNEL_ACCESS_FAILURE:
- lp->trac.channel_access_failure++;
- break;
- case TRAC_NO_ACK:
- lp->trac.no_ack++;
- break;
- case TRAC_INVALID:
- lp->trac.invalid++;
- break;
- default:
- WARN_ONCE(1, "received tx trac status %d\n", trac);
- break;
- }
+ switch (trac) {
+ case TRAC_SUCCESS:
+ case TRAC_SUCCESS_DATA_PENDING:
+ ctx->trac = IEEE802154_SUCCESS;
+ break;
+ case TRAC_CHANNEL_ACCESS_FAILURE:
+ ctx->trac = IEEE802154_CHANNEL_ACCESS_FAILURE;
+ break;
+ case TRAC_NO_ACK:
+ ctx->trac = IEEE802154_NO_ACK;
+ break;
+ default:
+ ctx->trac = IEEE802154_SYSTEM_ERROR;
}
at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
@@ -737,25 +720,6 @@ at86rf230_rx_trac_check(void *context)
u8 *buf = ctx->buf;
int rc;
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
- u8 trac = TRAC_MASK(buf[1]);
-
- switch (trac) {
- case TRAC_SUCCESS:
- lp->trac.success++;
- break;
- case TRAC_SUCCESS_WAIT_FOR_ACK:
- lp->trac.success_wait_for_ack++;
- break;
- case TRAC_INVALID:
- lp->trac.invalid++;
- break;
- default:
- WARN_ONCE(1, "received rx trac status %d\n", trac);
- break;
- }
- }
-
buf[0] = CMD_FB;
ctx->trx.len = AT86RF2XX_MAX_BUF;
ctx->msg.complete = at86rf230_rx_read_frame_complete;
@@ -951,10 +915,6 @@ at86rf230_start(struct ieee802154_hw *hw)
{
struct at86rf230_local *lp = hw->priv;
- /* reset trac stats on start */
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS))
- memset(&lp->trac, 0, sizeof(struct at86rf230_trac));
-
at86rf230_awake(lp);
enable_irq(lp->spi->irq);
@@ -1064,36 +1024,6 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
if (rc < 0)
return rc;
- /* This sets the symbol_duration according frequency on the 212.
- * TODO move this handling while set channel and page in cfg802154.
- * We can do that, this timings are according 802.15.4 standard.
- * If we do that in cfg802154, this is a more generic calculation.
- *
- * This should also protected from ifs_timer. Means cancel timer and
- * init with a new value. For now, this is okay.
- */
- if (channel == 0) {
- if (page == 0) {
- /* SUB:0 and BPSK:0 -> BPSK-20 */
- lp->hw->phy->symbol_duration = 50;
- } else {
- /* SUB:1 and BPSK:0 -> BPSK-40 */
- lp->hw->phy->symbol_duration = 25;
- }
- } else {
- if (page == 0)
- /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
- lp->hw->phy->symbol_duration = 40;
- else
- /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
- lp->hw->phy->symbol_duration = 16;
- }
-
- lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
- lp->hw->phy->symbol_duration;
- lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
- lp->hw->phy->symbol_duration;
-
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
@@ -1569,7 +1499,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->data = &at86rf231_data;
lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 11;
- lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf231_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels;
@@ -1582,7 +1511,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->supported.channels[0] = 0x00007FF;
lp->hw->phy->supported.channels[2] = 0x00007FF;
lp->hw->phy->current_channel = 5;
- lp->hw->phy->symbol_duration = 25;
lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
lp->hw->phy->supported.tx_powers = at86rf212_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
@@ -1594,7 +1522,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->data = &at86rf233_data;
lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 13;
- lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf233_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels;
@@ -1615,47 +1542,6 @@ not_supp:
return rc;
}
-#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
-static struct dentry *at86rf230_debugfs_root;
-
-static int at86rf230_stats_show(struct seq_file *file, void *offset)
-{
- struct at86rf230_local *lp = file->private;
-
- seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success);
- seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n",
- lp->trac.success_data_pending);
- seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n",
- lp->trac.success_wait_for_ack);
- seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n",
- lp->trac.channel_access_failure);
- seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack);
- seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(at86rf230_stats);
-
-static void at86rf230_debugfs_init(struct at86rf230_local *lp)
-{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
-
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
-
- at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
-
- debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp,
- &at86rf230_stats_fops);
-}
-
-static void at86rf230_debugfs_remove(void)
-{
- debugfs_remove_recursive(at86rf230_debugfs_root);
-}
-#else
-static void at86rf230_debugfs_init(struct at86rf230_local *lp) { }
-static void at86rf230_debugfs_remove(void) { }
-#endif
-
static int at86rf230_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
@@ -1752,16 +1638,12 @@ static int at86rf230_probe(struct spi_device *spi)
/* going into sleep by default */
at86rf230_sleep(lp);
- at86rf230_debugfs_init(lp);
-
rc = ieee802154_register_hw(lp->hw);
if (rc)
- goto free_debugfs;
+ goto free_dev;
return rc;
-free_debugfs:
- at86rf230_debugfs_remove();
free_dev:
ieee802154_free_hw(lp->hw);
@@ -1776,7 +1658,6 @@ static void at86rf230_remove(struct spi_device *spi)
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_hw(lp->hw);
ieee802154_free_hw(lp->hw);
- at86rf230_debugfs_remove();
dev_dbg(&spi->dev, "unregistered at86rf230\n");
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 07bafbf94680..2c338783893d 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -206,9 +206,7 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq)
* unlikely case now that seq == expect is then true, but can
* happen and fail with a tx_skb = NULL;
*/
- ieee802154_wake_queue(atusb->hw);
- if (atusb->tx_skb)
- dev_kfree_skb_irq(atusb->tx_skb);
+ ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb);
}
}
@@ -614,36 +612,6 @@ static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
if (rc < 0)
return rc;
- /* This sets the symbol_duration according frequency on the 212.
- * TODO move this handling while set channel and page in cfg802154.
- * We can do that, this timings are according 802.15.4 standard.
- * If we do that in cfg802154, this is a more generic calculation.
- *
- * This should also protected from ifs_timer. Means cancel timer and
- * init with a new value. For now, this is okay.
- */
- if (channel == 0) {
- if (page == 0) {
- /* SUB:0 and BPSK:0 -> BPSK-20 */
- lp->hw->phy->symbol_duration = 50;
- } else {
- /* SUB:1 and BPSK:0 -> BPSK-40 */
- lp->hw->phy->symbol_duration = 25;
- }
- } else {
- if (page == 0)
- /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
- lp->hw->phy->symbol_duration = 40;
- else
- /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
- lp->hw->phy->symbol_duration = 16;
- }
-
- lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
- lp->hw->phy->symbol_duration;
- lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
- lp->hw->phy->symbol_duration;
-
return atusb_write_subreg(lp, SR_CHANNEL, channel);
}
@@ -869,7 +837,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
chip = "AT86RF230";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
- atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
@@ -879,7 +846,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
chip = "AT86RF231";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
- atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
@@ -891,7 +857,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
atusb->hw->phy->supported.channels[0] = 0x00007FF;
atusb->hw->phy->supported.channels[2] = 0x00007FF;
atusb->hw->phy->current_channel = 5;
- atusb->hw->phy->symbol_duration = 25;
atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
atusb->hw->phy->supported.tx_powers = at86rf212_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 187cbc634ce8..42c0b451088d 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -89,48 +89,6 @@
#define CA8210_TEST_INT_FILE_NAME "ca8210_test"
#define CA8210_TEST_INT_FIFO_SIZE 256
-/* MAC status enumerations */
-#define MAC_SUCCESS (0x00)
-#define MAC_ERROR (0x01)
-#define MAC_CANCELLED (0x02)
-#define MAC_READY_FOR_POLL (0x03)
-#define MAC_COUNTER_ERROR (0xDB)
-#define MAC_IMPROPER_KEY_TYPE (0xDC)
-#define MAC_IMPROPER_SECURITY_LEVEL (0xDD)
-#define MAC_UNSUPPORTED_LEGACY (0xDE)
-#define MAC_UNSUPPORTED_SECURITY (0xDF)
-#define MAC_BEACON_LOST (0xE0)
-#define MAC_CHANNEL_ACCESS_FAILURE (0xE1)
-#define MAC_DENIED (0xE2)
-#define MAC_DISABLE_TRX_FAILURE (0xE3)
-#define MAC_SECURITY_ERROR (0xE4)
-#define MAC_FRAME_TOO_LONG (0xE5)
-#define MAC_INVALID_GTS (0xE6)
-#define MAC_INVALID_HANDLE (0xE7)
-#define MAC_INVALID_PARAMETER (0xE8)
-#define MAC_NO_ACK (0xE9)
-#define MAC_NO_BEACON (0xEA)
-#define MAC_NO_DATA (0xEB)
-#define MAC_NO_SHORT_ADDRESS (0xEC)
-#define MAC_OUT_OF_CAP (0xED)
-#define MAC_PAN_ID_CONFLICT (0xEE)
-#define MAC_REALIGNMENT (0xEF)
-#define MAC_TRANSACTION_EXPIRED (0xF0)
-#define MAC_TRANSACTION_OVERFLOW (0xF1)
-#define MAC_TX_ACTIVE (0xF2)
-#define MAC_UNAVAILABLE_KEY (0xF3)
-#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4)
-#define MAC_INVALID_ADDRESS (0xF5)
-#define MAC_ON_TIME_TOO_LONG (0xF6)
-#define MAC_PAST_TIME (0xF7)
-#define MAC_TRACKING_OFF (0xF8)
-#define MAC_INVALID_INDEX (0xF9)
-#define MAC_LIMIT_REACHED (0xFA)
-#define MAC_READ_ONLY (0xFB)
-#define MAC_SCAN_IN_PROGRESS (0xFC)
-#define MAC_SUPERFRAME_OVERLAP (0xFD)
-#define MAC_SYSTEM_ERROR (0xFF)
-
/* HWME attribute IDs */
#define HWME_EDTHRESHOLD (0x04)
#define HWME_EDVALUE (0x06)
@@ -551,58 +509,58 @@ static int link_to_linux_err(int link_status)
return link_status;
}
switch (link_status) {
- case MAC_SUCCESS:
- case MAC_REALIGNMENT:
+ case IEEE802154_SUCCESS:
+ case IEEE802154_REALIGNMENT:
return 0;
- case MAC_IMPROPER_KEY_TYPE:
+ case IEEE802154_IMPROPER_KEY_TYPE:
return -EKEYREJECTED;
- case MAC_IMPROPER_SECURITY_LEVEL:
- case MAC_UNSUPPORTED_LEGACY:
- case MAC_DENIED:
+ case IEEE802154_IMPROPER_SECURITY_LEVEL:
+ case IEEE802154_UNSUPPORTED_LEGACY:
+ case IEEE802154_DENIED:
return -EACCES;
- case MAC_BEACON_LOST:
- case MAC_NO_ACK:
- case MAC_NO_BEACON:
+ case IEEE802154_BEACON_LOST:
+ case IEEE802154_NO_ACK:
+ case IEEE802154_NO_BEACON:
return -ENETUNREACH;
- case MAC_CHANNEL_ACCESS_FAILURE:
- case MAC_TX_ACTIVE:
- case MAC_SCAN_IN_PROGRESS:
+ case IEEE802154_CHANNEL_ACCESS_FAILURE:
+ case IEEE802154_TX_ACTIVE:
+ case IEEE802154_SCAN_IN_PROGRESS:
return -EBUSY;
- case MAC_DISABLE_TRX_FAILURE:
- case MAC_OUT_OF_CAP:
+ case IEEE802154_DISABLE_TRX_FAILURE:
+ case IEEE802154_OUT_OF_CAP:
return -EAGAIN;
- case MAC_FRAME_TOO_LONG:
+ case IEEE802154_FRAME_TOO_LONG:
return -EMSGSIZE;
- case MAC_INVALID_GTS:
- case MAC_PAST_TIME:
+ case IEEE802154_INVALID_GTS:
+ case IEEE802154_PAST_TIME:
return -EBADSLT;
- case MAC_INVALID_HANDLE:
+ case IEEE802154_INVALID_HANDLE:
return -EBADMSG;
- case MAC_INVALID_PARAMETER:
- case MAC_UNSUPPORTED_ATTRIBUTE:
- case MAC_ON_TIME_TOO_LONG:
- case MAC_INVALID_INDEX:
+ case IEEE802154_INVALID_PARAMETER:
+ case IEEE802154_UNSUPPORTED_ATTRIBUTE:
+ case IEEE802154_ON_TIME_TOO_LONG:
+ case IEEE802154_INVALID_INDEX:
return -EINVAL;
- case MAC_NO_DATA:
+ case IEEE802154_NO_DATA:
return -ENODATA;
- case MAC_NO_SHORT_ADDRESS:
+ case IEEE802154_NO_SHORT_ADDRESS:
return -EFAULT;
- case MAC_PAN_ID_CONFLICT:
+ case IEEE802154_PAN_ID_CONFLICT:
return -EADDRINUSE;
- case MAC_TRANSACTION_EXPIRED:
+ case IEEE802154_TRANSACTION_EXPIRED:
return -ETIME;
- case MAC_TRANSACTION_OVERFLOW:
+ case IEEE802154_TRANSACTION_OVERFLOW:
return -ENOBUFS;
- case MAC_UNAVAILABLE_KEY:
+ case IEEE802154_UNAVAILABLE_KEY:
return -ENOKEY;
- case MAC_INVALID_ADDRESS:
+ case IEEE802154_INVALID_ADDRESS:
return -ENXIO;
- case MAC_TRACKING_OFF:
- case MAC_SUPERFRAME_OVERLAP:
+ case IEEE802154_TRACKING_OFF:
+ case IEEE802154_SUPERFRAME_OVERLAP:
return -EREMOTEIO;
- case MAC_LIMIT_REACHED:
+ case IEEE802154_LIMIT_REACHED:
return -EDQUOT;
- case MAC_READ_ONLY:
+ case IEEE802154_READ_ONLY:
return -EROFS;
default:
return -EPROTO;
@@ -754,7 +712,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
ca8210_net_rx(priv->hw, buf, len);
if (buf[0] == SPI_MCPS_DATA_CONFIRM) {
- if (buf[3] == MAC_TRANSACTION_OVERFLOW) {
+ if (buf[3] == IEEE802154_TRANSACTION_OVERFLOW) {
dev_info(
&priv->spi->dev,
"Waiting for transaction overflow to stabilise...\n");
@@ -1128,7 +1086,7 @@ static u8 tdme_setsfr_request_sync(
);
if (ret) {
dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret);
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_TDME_SETSFR_CONFIRM) {
@@ -1137,7 +1095,7 @@ static u8 tdme_setsfr_request_sync(
"sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n",
response.command_id
);
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
return response.pdata.tdme_set_sfr_cnf.status;
@@ -1151,7 +1109,7 @@ static u8 tdme_setsfr_request_sync(
*/
static u8 tdme_chipinit(void *device_ref)
{
- u8 status = MAC_SUCCESS;
+ u8 status = IEEE802154_SUCCESS;
u8 sfr_address;
struct spi_device *spi = device_ref;
struct preamble_cfg_sfr pre_cfg_value = {
@@ -1220,7 +1178,7 @@ static u8 tdme_chipinit(void *device_ref)
goto finish;
finish:
- if (status != MAC_SUCCESS) {
+ if (status != IEEE802154_SUCCESS) {
dev_err(
&spi->dev,
"failed to set sfr at %#03x, status = %#03x\n",
@@ -1287,7 +1245,7 @@ static u8 tdme_checkpibattribute(
const void *pib_attribute_value
)
{
- u8 status = MAC_SUCCESS;
+ u8 status = IEEE802154_SUCCESS;
u8 value;
value = *((u8 *)pib_attribute_value);
@@ -1296,52 +1254,52 @@ static u8 tdme_checkpibattribute(
/* PHY */
case PHY_TRANSMIT_POWER:
if (value > 0x3F)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case PHY_CCA_MODE:
if (value > 0x03)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* MAC */
case MAC_BATT_LIFE_EXT_PERIODS:
if (value < 6 || value > 41)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_PAYLOAD:
if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_PAYLOAD_LENGTH:
if (value > MAX_BEACON_PAYLOAD_LENGTH)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_ORDER:
if (value > 15)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_BE:
if (value < 3 || value > 8)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_CSMA_BACKOFFS:
if (value > 5)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_FRAME_RETRIES:
if (value > 7)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MIN_BE:
if (value > 8)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_RESPONSE_WAIT_TIME:
if (value < 2 || value > 64)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_SUPERFRAME_ORDER:
if (value > 15)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* boolean */
case MAC_ASSOCIATED_PAN_COORD:
@@ -1353,16 +1311,16 @@ static u8 tdme_checkpibattribute(
case MAC_RX_ON_WHEN_IDLE:
case MAC_SECURITY_ENABLED:
if (value > 1)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* MAC SEC */
case MAC_AUTO_REQUEST_SECURITY_LEVEL:
if (value > 7)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_AUTO_REQUEST_KEY_ID_MODE:
if (value > 3)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
default:
break;
@@ -1522,9 +1480,9 @@ static u8 mcps_data_request(
if (ca8210_spi_transfer(device_ref, &command.command_id,
command.length + 2))
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
- return MAC_SUCCESS;
+ return IEEE802154_SUCCESS;
}
/**
@@ -1553,11 +1511,11 @@ static u8 mlme_reset_request_sync(
&response.command_id,
device_ref)) {
dev_err(&spi->dev, "cascoda_api_downstream failed\n");
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_MLME_RESET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
status = response.pdata.status;
@@ -1600,7 +1558,7 @@ static u8 mlme_set_request_sync(
*/
if (tdme_checkpibattribute(
pib_attribute, pib_attribute_length, pib_attribute_value)) {
- return MAC_INVALID_PARAMETER;
+ return IEEE802154_INVALID_PARAMETER;
}
if (pib_attribute == PHY_CURRENT_CHANNEL) {
@@ -1636,11 +1594,11 @@ static u8 mlme_set_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_MLME_SET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
return response.pdata.status;
}
@@ -1678,11 +1636,11 @@ static u8 hwme_set_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_HWME_SET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
return response.pdata.hwme_set_cnf.status;
}
@@ -1714,13 +1672,13 @@ static u8 hwme_get_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_HWME_GET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
- if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) {
+ if (response.pdata.hwme_get_cnf.status == IEEE802154_SUCCESS) {
*hw_attribute_length =
response.pdata.hwme_get_cnf.hw_attribute_length;
memcpy(
@@ -1770,9 +1728,8 @@ static int ca8210_async_xmit_complete(
"Link transmission unsuccessful, status = %d\n",
status
);
- if (status != MAC_TRANSACTION_OVERFLOW) {
- dev_kfree_skb_any(priv->tx_skb);
- ieee802154_wake_queue(priv->hw);
+ if (status != IEEE802154_TRANSACTION_OVERFLOW) {
+ ieee802154_xmit_error(priv->hw, priv->tx_skb, status);
return 0;
}
}
@@ -2436,7 +2393,7 @@ static int ca8210_test_check_upstream(u8 *buf, void *device_ref)
if (ret) {
response[0] = SPI_MLME_SET_CONFIRM;
response[1] = 3;
- response[2] = MAC_INVALID_PARAMETER;
+ response[2] = IEEE802154_INVALID_PARAMETER;
response[3] = buf[2];
response[4] = buf[3];
if (cascoda_api_upstream)
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index c927a5ae0d05..2fe0e4a0a0c4 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -975,10 +975,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__);
- phy->symbol_duration = 16;
- phy->lifs_period = 40 * phy->symbol_duration;
- phy->sifs_period = 12 * phy->symbol_duration;
-
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS;
@@ -1006,7 +1002,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
phy->current_page = 0;
/* MCR20A default reset value */
phy->current_channel = 20;
- phy->symbol_duration = 16;
phy->supported.tx_powers = mcr20a_powers;
phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
phy->cca_ed_level = phy->supported.cca_ed_levels[75];
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 888e94278a84..e133eb2bebcf 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
*/
if (data->endpoint.config.aggregation) {
limit += SZ_1K * aggr_byte_limit_max(ipa->version);
- if (buffer_size > limit) {
+ if (buffer_size - NET_SKB_PAD > limit) {
dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
- data->endpoint_id, buffer_size, limit);
+ data->endpoint_id,
+ buffer_size - NET_SKB_PAD, limit);
return false;
}
@@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx_data *rx_data;
+ u32 buffer_size;
bool close_eof;
u32 limit;
@@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(rx_data->buffer_size);
+ buffer_size = rx_data->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index e2273588c75b..944d005d2bd1 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -3,6 +3,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/reset.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
@@ -21,6 +22,10 @@
#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26)
#define MDIO_C22_OP_WRITE 0b01
#define MDIO_C22_OP_READ 0b10
+#define MDIO_C45_OP_ADDR 0b00
+#define MDIO_C45_OP_WRITE 0b01
+#define MDIO_C45_OP_PREAD 0b10
+#define MDIO_C45_OP_READ 0b11
#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21)
#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16)
#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0)
@@ -37,36 +42,38 @@
struct aspeed_mdio {
void __iomem *base;
+ struct reset_control *reset;
};
-static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
+ u16 data)
{
struct aspeed_mdio *ctx = bus->priv;
u32 ctrl;
- u32 data;
- int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
- regnum);
-
- /* Just clause 22 for the moment */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n",
+ __func__, st, op, phyad, regad, data);
ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum);
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad)
+ | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data);
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
- rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
!(ctrl & ASPEED_MDIO_CTRL_FIRE),
ASPEED_MDIO_INTERVAL_US,
ASPEED_MDIO_TIMEOUT_US);
- if (rc < 0)
- return rc;
+}
+
+static int aspeed_mdio_get_data(struct mii_bus *bus)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 data;
+ int rc;
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
data & ASPEED_MDIO_DATA_IDLE,
@@ -78,31 +85,80 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data);
}
-static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum)
{
- struct aspeed_mdio *ctx = bus->priv;
- u32 ctrl;
+ int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
- __func__, addr, regnum, val);
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ,
+ addr, regnum, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE,
+ addr, regnum, val);
+}
+
+static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ,
+ addr, c45_dev, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE,
+ addr, c45_dev, val);
+}
+
+static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
+ regnum);
- /* Just clause 22 for the moment */
if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ return aspeed_mdio_read_c45(bus, addr, regnum);
- ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum)
- | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val);
+ return aspeed_mdio_read_c22(bus, addr, regnum);
+}
- iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
+ __func__, addr, regnum, val);
- return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
- !(ctrl & ASPEED_MDIO_CTRL_FIRE),
- ASPEED_MDIO_INTERVAL_US,
- ASPEED_MDIO_TIMEOUT_US);
+ if (regnum & MII_ADDR_C45)
+ return aspeed_mdio_write_c45(bus, addr, regnum, val);
+
+ return aspeed_mdio_write_c22(bus, addr, regnum, val);
}
static int aspeed_mdio_probe(struct platform_device *pdev)
@@ -120,15 +176,23 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
+ ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(ctx->reset))
+ return PTR_ERR(ctx->reset);
+
+ reset_control_deassert(ctx->reset);
+
bus->name = DRV_NAME;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
bus->read = aspeed_mdio_read;
bus->write = aspeed_mdio_write;
+ bus->probe_capabilities = MDIOBUS_C22_C45;
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ reset_control_assert(ctx->reset);
return rc;
}
@@ -139,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
static int aspeed_mdio_remove(struct platform_device *pdev)
{
- mdiobus_unregister(platform_get_drvdata(pdev));
+ struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev);
+ struct aspeed_mdio *ctx = bus->priv;
+
+ reset_control_assert(ctx->reset);
+ mdiobus_unregister(bus);
return 0;
}
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 582969751b4c..08541007b18a 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -30,6 +31,8 @@
#define MSCC_MIIM_CMD_VLD BIT(31)
#define MSCC_MIIM_REG_DATA 0xC
#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
+#define MSCC_MIIM_REG_CFG 0x10
+#define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0)
#define MSCC_PHY_REG_PHY_CFG 0x0
#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
@@ -50,6 +53,8 @@ struct mscc_miim_dev {
int mii_status_offset;
struct regmap *phy_regs;
const struct mscc_miim_info *info;
+ struct clk *clk;
+ u32 bus_freq;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -241,9 +246,33 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
}
EXPORT_SYMBOL(mscc_miim_setup);
+static int mscc_miim_clk_set(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ unsigned long rate;
+ u32 div;
+
+ /* Keep the current settings */
+ if (!miim->bus_freq)
+ return 0;
+
+ rate = clk_get_rate(miim->clk);
+
+ div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1;
+ if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) {
+ dev_err(&bus->dev, "Incorrect MDIO clock frequency\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG,
+ MSCC_MIIM_CFG_PRESCALE_MASK, div);
+}
+
static int mscc_miim_probe(struct platform_device *pdev)
{
struct regmap *mii_regmap, *phy_regmap = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
void __iomem *regs, *phy_regs;
struct mscc_miim_dev *miim;
struct resource *res;
@@ -252,63 +281,87 @@ static int mscc_miim_probe(struct platform_device *pdev)
regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(regs)) {
- dev_err(&pdev->dev, "Unable to map MIIM registers\n");
+ dev_err(dev, "Unable to map MIIM registers\n");
return PTR_ERR(regs);
}
- mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
- &mscc_miim_regmap_config);
+ mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config);
if (IS_ERR(mii_regmap)) {
- dev_err(&pdev->dev, "Unable to create MIIM regmap\n");
+ dev_err(dev, "Unable to create MIIM regmap\n");
return PTR_ERR(mii_regmap);
}
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ phy_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ dev_err(dev, "Unable to map internal phy registers\n");
return PTR_ERR(phy_regs);
}
- phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
+ phy_regmap = devm_regmap_init_mmio(dev, phy_regs,
&mscc_miim_phy_regmap_config);
if (IS_ERR(phy_regmap)) {
- dev_err(&pdev->dev, "Unable to create phy register regmap\n");
+ dev_err(dev, "Unable to create phy register regmap\n");
return PTR_ERR(phy_regmap);
}
}
- ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0);
+ ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Unable to setup the MDIO bus\n");
+ dev_err(dev, "Unable to setup the MDIO bus\n");
return ret;
}
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->info = device_get_match_data(&pdev->dev);
+ miim->info = device_get_match_data(dev);
if (!miim->info)
return -EINVAL;
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ miim->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(miim->clk))
+ return PTR_ERR(miim->clk);
+
+ of_property_read_u32(np, "clock-frequency", &miim->bus_freq);
+
+ if (miim->bus_freq && !miim->clk) {
+ dev_err(dev, "cannot use clock-frequency without a clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(miim->clk);
+ if (ret)
return ret;
+
+ ret = mscc_miim_clk_set(bus);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_disable_clk;
}
platform_set_drvdata(pdev, bus);
return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(miim->clk);
+ return ret;
}
static int mscc_miim_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mscc_miim_dev *miim = bus->priv;
+ clk_disable_unprepare(miim->clk);
mdiobus_unregister(bus);
return 0;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 378ee779061c..c8f398f5bc5b 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -22,6 +22,7 @@
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <net/fib_notifier.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
@@ -78,7 +79,7 @@ struct nsim_fib_rt {
struct nsim_fib4_rt {
struct nsim_fib_rt common;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -283,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data,
fib4_rt->fi = fen_info->fi;
fib_info_hold(fib4_rt->fi);
- fib4_rt->tos = fen_info->tos;
+ fib4_rt->dscp = fen_info->dscp;
fib4_rt->type = fen_info->type;
return fib4_rt;
@@ -322,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -342,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net,
fri.tb_id = fib4_rt->common.key.tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_rt->tos;
+ fri.dscp = fib4_rt->dscp;
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 61418d4dc0cd..4cfd05c15aee 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -175,20 +175,18 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
struct mii_bus *bus = xpcs->mdiodev->bus;
int addr = xpcs->mdiodev->addr;
- return mdiobus_read(bus, addr, reg_addr);
+ return mdiobus_c45_read(bus, addr, dev, reg);
}
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
struct mii_bus *bus = xpcs->mdiodev->bus;
int addr = xpcs->mdiodev->addr;
- return mdiobus_write(bus, addr, reg_addr, val);
+ return mdiobus_c45_write(bus, addr, dev, reg, val);
}
static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index ea7571a2b39b..bbbf6c07ea53 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -83,6 +83,13 @@ config ADIN_PHY
- ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
Ethernet PHY
+config ADIN1100_PHY
+ tristate "Analog Devices Industrial Ethernet T1L PHYs"
+ help
+ Adds support for the Analog Devices Industrial T1L Ethernet PHYs.
+ Currently supports the:
+ - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
+
config AQUANTIA_PHY
tristate "Aquantia PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b2728d00fc9a..b82651b57043 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,6 +31,7 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_ADIN_PHY) += adin.o
+obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
aquantia-objs += aquantia_main.o
ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
new file mode 100644
index 000000000000..b6d139501199
--- /dev/null
+++ b/drivers/net/phy/adin1100.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Driver for Analog Devices Industrial Ethernet T1L PHYs
+ *
+ * Copyright 2020 Analog Devices Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+
+#define PHY_ID_ADIN1100 0x0283bc81
+
+#define ADIN_FORCED_MODE 0x8000
+#define ADIN_FORCED_MODE_EN BIT(0)
+
+#define ADIN_CRSM_SFT_RST 0x8810
+#define ADIN_CRSM_SFT_RST_EN BIT(0)
+
+#define ADIN_CRSM_SFT_PD_CNTRL 0x8812
+#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0)
+
+#define ADIN_AN_PHY_INST_STATUS 0x8030
+#define ADIN_IS_CFG_SLV BIT(2)
+#define ADIN_IS_CFG_MST BIT(3)
+
+#define ADIN_CRSM_STAT 0x8818
+#define ADIN_CRSM_SFT_PD_RDY BIT(1)
+#define ADIN_CRSM_SYS_RDY BIT(0)
+
+#define ADIN_MSE_VAL 0x830B
+
+#define ADIN_SQI_MAX 7
+
+struct adin_mse_sqi_range {
+ u16 start;
+ u16 end;
+};
+
+static const struct adin_mse_sqi_range adin_mse_sqi_map[] = {
+ { 0x0A74, 0xFFFF },
+ { 0x084E, 0x0A74 },
+ { 0x0698, 0x084E },
+ { 0x053D, 0x0698 },
+ { 0x0429, 0x053D },
+ { 0x034E, 0x0429 },
+ { 0x02A0, 0x034E },
+ { 0x0000, 0x02A0 },
+};
+
+/**
+ * struct adin_priv - ADIN PHY driver private data
+ * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L)
+ * @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L)
+ * @tx_level_prop_present: set if the TX level is specified in DT
+ */
+struct adin_priv {
+ unsigned int tx_level_2v4_able:1;
+ unsigned int tx_level_2v4:1;
+ unsigned int tx_level_prop_present:1;
+};
+
+static int adin_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_read_status(phydev);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ADIN_IS_CFG_SLV)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ if (ret & ADIN_IS_CFG_MST)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+
+ return 0;
+}
+
+static int adin_config_aneg(struct phy_device *phydev)
+{
+ struct adin_priv *priv = phydev->priv;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (priv->tx_level_prop_present && priv->tx_level_2v4)
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
+ MDIO_PMA_10T1L_CTRL_2V4_EN);
+ else
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
+ MDIO_PMA_10T1L_CTRL_2V4_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Force PHY to use above configurations */
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
+ }
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Request increased transmit level from LP. */
+ if (priv->tx_level_prop_present && priv->tx_level_2v4) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
+ MDIO_AN_T1_ADV_H_10L_TX_HI |
+ MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Disable 2.4 Vpp transmit level. */
+ if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
+ MDIO_AN_T1_ADV_H_10L_TX_HI |
+ MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
+ if (ret < 0)
+ return ret;
+ }
+
+ return genphy_c45_config_aneg(phydev);
+}
+
+static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
+{
+ int ret;
+ int val;
+
+ val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN_CRSM_SFT_PD_CNTRL, val);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
+ (ret & ADIN_CRSM_SFT_PD_RDY) == val,
+ 1000, 30000, true);
+}
+
+static int adin_suspend(struct phy_device *phydev)
+{
+ return adin_set_powerdown_mode(phydev, true);
+}
+
+static int adin_resume(struct phy_device *phydev)
+{
+ return adin_set_powerdown_mode(phydev, false);
+}
+
+static int adin_set_loopback(struct phy_device *phydev, bool enable)
+{
+ if (enable)
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
+ BMCR_LOOPBACK);
+
+ /* PCS loopback (according to 10BASE-T1L spec) */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
+ BMCR_LOOPBACK);
+}
+
+static int adin_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
+ (ret & ADIN_CRSM_SYS_RDY),
+ 10000, 30000, true);
+}
+
+static int adin_get_features(struct phy_device *phydev)
+{
+ struct adin_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+ u8 val;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT);
+ if (ret < 0)
+ return ret;
+
+ /* This depends on the voltage level from the power source */
+ priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE);
+
+ phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n",
+ priv->tx_level_2v4_able ? "yes" : "no");
+
+ priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp");
+ if (priv->tx_level_prop_present) {
+ ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val);
+ if (ret < 0)
+ return ret;
+
+ priv->tx_level_2v4 = val;
+ if (!priv->tx_level_2v4 && priv->tx_level_2v4_able)
+ phydev_info(phydev,
+ "PHY supports 2.4V TX level, but disabled via config\n");
+ }
+
+ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int adin_get_sqi(struct phy_device *phydev)
+{
+ u16 mse_val;
+ int sqi;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+ else if (!(ret & MDIO_STAT1_LSTATUS))
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL);
+ if (ret < 0)
+ return ret;
+
+ mse_val = 0xFFFF & ret;
+ for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) {
+ if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end)
+ return sqi;
+ }
+
+ return -EINVAL;
+}
+
+static int adin_get_sqi_max(struct phy_device *phydev)
+{
+ return ADIN_SQI_MAX;
+}
+
+static int adin_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct adin_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static struct phy_driver adin_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100),
+ .name = "ADIN1100",
+ .get_features = adin_get_features,
+ .soft_reset = adin_soft_reset,
+ .probe = adin_probe,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .set_loopback = adin_set_loopback,
+ .suspend = adin_suspend,
+ .resume = adin_resume,
+ .get_sqi = adin_get_sqi,
+ .get_sqi_max = adin_get_sqi_max,
+ },
+};
+
+module_phy_driver(adin_driver);
+
+static struct mdio_device_id __maybe_unused adin_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, adin_tbl);
+MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 313563482690..cc2858107668 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -10,12 +10,12 @@
#define PHY_ID_BCM8706 0x0143bdc1
#define PHY_ID_BCM8727 0x0143bff0
-#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
-#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
-#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
+#define BCM87XX_PMD_RX_SIGNAL_DETECT 0x000a
+#define BCM87XX_10GBASER_PCS_STATUS 0x0020
+#define BCM87XX_XGXS_LANE_STATUS 0x0018
-#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
-#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
+#define BCM87XX_LASI_CONTROL 0x9002
+#define BCM87XX_LASI_STATUS 0x9005
#if IS_ENABLED(CONFIG_OF_MDIO)
/* Set and/or override some configuration registers based on the
@@ -54,11 +54,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
u16 reg = be32_to_cpup(paddr++);
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
- u32 regnum = mdiobus_c45_addr(devid, reg);
int val = 0;
if (mask) {
- val = phy_read(phydev, regnum);
+ val = phy_read_mmd(phydev, devid, reg);
if (val < 0) {
ret = val;
goto err;
@@ -67,7 +66,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
}
val |= val_bits;
- ret = phy_write(phydev, regnum, val);
+ ret = phy_write_mmd(phydev, devid, reg, val);
if (ret < 0)
goto err;
}
@@ -104,21 +103,24 @@ static int bcm87xx_read_status(struct phy_device *phydev)
int pcs_status;
int xgxs_lane_status;
- rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
+ rx_signal_detect = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ BCM87XX_PMD_RX_SIGNAL_DETECT);
if (rx_signal_detect < 0)
return rx_signal_detect;
if ((rx_signal_detect & 1) == 0)
goto no_link;
- pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
+ pcs_status = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_10GBASER_PCS_STATUS);
if (pcs_status < 0)
return pcs_status;
if ((pcs_status & 1) == 0)
goto no_link;
- xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
+ xgxs_lane_status = phy_read_mmd(phydev, MDIO_MMD_PHYXS,
+ BCM87XX_XGXS_LANE_STATUS);
if (xgxs_lane_status < 0)
return xgxs_lane_status;
@@ -139,25 +141,27 @@ static int bcm87xx_config_intr(struct phy_device *phydev)
{
int reg, err;
- reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
+ reg = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_CONTROL);
if (reg < 0)
return reg;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS);
if (err)
return err;
reg |= 1;
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ err = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_LASI_CONTROL, reg);
} else {
reg &= ~1;
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ err = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_LASI_CONTROL, reg);
if (err)
return err;
- err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS);
}
return err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2702faf7b0f6..47e83c1e9051 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -961,7 +961,21 @@ static int m88e1111_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- return genphy_soft_reset(phydev);
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* If the HWCFG_MODE was changed from another mode (such as
+ * 1000BaseX) to SGMII, the state of the support bits may have
+ * also changed now that the PHY has been reset.
+ * Update the PHY abilities accordingly.
+ */
+ err = genphy_read_abilities(phydev);
+ linkmode_or(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ }
+ return err;
}
static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fc53b71dc872..685a0ab5453c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -32,6 +32,7 @@
#include <linux/ptp_clock.h>
#include <linux/ptp_classify.h>
#include <linux/net_tstamp.h>
+#include <linux/gpio/consumer.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -70,6 +71,27 @@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+#define KSZ9x31_LMD 0x12
+#define KSZ9x31_LMD_VCT_EN BIT(15)
+#define KSZ9x31_LMD_VCT_DIS_TX BIT(14)
+#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12)
+#define KSZ9x31_LMD_VCT_SEL_RESULT 0
+#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10)
+#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11)
+#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10)
+#define KSZ9x31_LMD_VCT_ST_NORMAL 0
+#define KSZ9x31_LMD_VCT_ST_OPEN 1
+#define KSZ9x31_LMD_VCT_ST_SHORT 2
+#define KSZ9x31_LMD_VCT_ST_FAIL 3
+#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8)
+#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7)
+#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6)
+#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5)
+#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4)
+#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2)
+#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
+#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@ -280,6 +302,7 @@ struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
int led_mode;
+ u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
@@ -1326,6 +1349,199 @@ static int ksz9031_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9x31_cable_test_start(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * Prior to running the cable diagnostics, Auto-negotiation should
+ * be disabled, full duplex set and the link speed set to 1000Mbps
+ * via the Basic Control Register.
+ */
+ ret = phy_modify(phydev, MII_BMCR,
+ BMCR_SPEED1000 | BMCR_FULLDPLX |
+ BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ret)
+ return ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * The Master-Slave configuration should be set to Slave by writing
+ * a value of 0x1000 to the Auto-Negotiation Master Slave Control
+ * Register.
+ */
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+
+ /* Cache these bits, they need to be restored once LinkMD finishes. */
+ priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret |= CTL1000_ENABLE_MASTER;
+
+ return phy_write(phydev, MII_CTRL1000, ret);
+}
+
+static int ksz9x31_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ9x31_LMD_VCT_ST_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz9x31_cable_test_failed(u16 status)
+{
+ int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status);
+
+ return stat == KSZ9x31_LMD_VCT_ST_FAIL;
+}
+
+static bool ksz9x31_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ fallthrough;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
+{
+ int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat);
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ *
+ * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
+ */
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131)
+ dt = clamp(dt - 22, 0, 255);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val,
+ !(val & KSZ9x31_LMD_VCT_EN),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz9x31_cable_test_get_pair(int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+
+ return ethtool_pair[pair];
+}
+
+static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ int ret, val;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * To test each individual cable pair, set the cable pair in the Cable
+ * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable
+ * Diagnostic Register, along with setting the Cable Diagnostics Test
+ * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit
+ * will self clear when the test is concluded.
+ */
+ ret = phy_write(phydev, KSZ9x31_LMD,
+ KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair));
+ if (ret)
+ return ret;
+
+ ret = ksz9x31_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ9x31_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz9x31_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz9x31_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_fault_length(phydev, val));
+}
+
+static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ unsigned long pair_mask = 0xf;
+ int retries = 20;
+ int pair, ret, rv;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz9x31_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ usleep_range(2000, 3000);
+ }
+
+ /* Report remaining unfinished pair result as unknown. */
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ *finished = true;
+
+ /* Restore cached bits from before LinkMD got started. */
+ rv = phy_modify(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER,
+ priv->vct_ctrl1000);
+ if (rv)
+ return rv;
+
+ return ret;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -2514,6 +2730,10 @@ static void lan8814_ptp_init(struct phy_device *phydev)
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
u32 temp;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return;
+
lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
@@ -2552,6 +2772,10 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
{
struct lan8814_shared_priv *shared = phydev->shared->priv;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return 0;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -2614,6 +2838,21 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
+static int lan8814_release_coma_mode(struct phy_device *phydev)
+{
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
+ GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod_set_consumer_name(gpiod, "LAN8814 coma mode");
+ gpiod_set_value_cansleep(gpiod, 0);
+
+ return 0;
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
struct kszphy_priv *priv;
@@ -2628,10 +2867,6 @@ static int lan8814_probe(struct phy_device *phydev)
phydev->priv = priv;
- if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
- !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
- return 0;
-
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
@@ -2640,6 +2875,10 @@ static int lan8814_probe(struct phy_device *phydev)
addr, sizeof(struct lan8814_shared_priv));
if (phy_package_init_once(phydev)) {
+ err = lan8814_release_coma_mode(phydev);
+ if (err)
+ return err;
+
err = lan8814_ptp_probe_once(phydev);
if (err)
return err;
@@ -2806,6 +3045,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.get_features = ksz9031_get_features,
@@ -2819,6 +3059,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -2853,6 +3095,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -2863,6 +3106,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c2c0e361fd3d..d4c93d59bc53 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -68,7 +68,12 @@
#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
#define T1_TX_RX_FIFO_CFG_REG 0x02
#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_COEF_CLK_PWR_DN_CFG 0x04
+#define T1_COEF_RW_CTL_CFG 0x0D
#define T1_SQI_CONFIG_REG 0x2E
+#define T1_SQI_CONFIG2_REG 0x4A
+#define T1_DCQ_SQI_REG 0xC3
+#define T1_DCQ_SQI_MSK GENMASK(3, 1)
#define T1_MDIO_CONTROL2_REG 0x10
#define T1_INTERRUPT_SOURCE_REG 0x18
#define T1_INTERRUPT2_SOURCE_REG 0x08
@@ -82,6 +87,9 @@
#define T1_MODE_STAT_REG 0x11
#define T1_LINK_UP_MSK BIT(0)
+/* SQI defines */
+#define LAN87XX_MAX_SQI 0x07
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
@@ -346,9 +354,20 @@ static int lan87xx_phy_init(struct phy_device *phydev)
T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* Setup SQI measurement */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 },
/* SQI enable */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* SQI select mode 5 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG2_REG, 0x0001, 0 },
+ /* Throws the first SQI reading */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_RW_CTL_CFG, 0x0301, 0 },
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP,
+ T1_DCQ_SQI_REG, 0, 0 },
/* Flag LPS and WUR as idle errors */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_MDIO_CONTROL2_REG, 0x0014, 0 },
@@ -724,6 +743,31 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
}
+static int lan87xx_get_sqi(struct phy_device *phydev)
+{
+ u8 sqi_value = 0;
+ int rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0);
+ if (rc < 0)
+ return rc;
+
+ sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc);
+
+ return sqi_value;
+}
+
+static int lan87xx_get_sqi_max(struct phy_device *phydev)
+{
+ return LAN87XX_MAX_SQI;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -737,6 +781,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
},
@@ -746,10 +792,14 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
+ .config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index db709d30bf84..eefdd67d5556 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,6 +9,25 @@
#include <linux/phy.h>
/**
+ * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
+ * @phydev: target phy_device struct
+ */
+static bool genphy_c45_baset1_able(struct phy_device *phydev)
+{
+ int val;
+
+ if (phydev->pma_extable == -ENODATA) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (val < 0)
+ return false;
+
+ phydev->pma_extable = val;
+ }
+
+ return !!(phydev->pma_extable & MDIO_PMA_EXTABLE_BT1);
+}
+
+/**
* genphy_c45_pma_can_sleep - checks if the PMA have sleep support
* @phydev: target phy_device struct
*/
@@ -80,7 +99,10 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
switch (phydev->speed) {
case SPEED_10:
- ctrl2 |= MDIO_PMA_CTRL2_10BT;
+ if (genphy_c45_baset1_able(phydev))
+ ctrl2 |= MDIO_PMA_CTRL2_BASET1;
+ else
+ ctrl2 |= MDIO_PMA_CTRL2_10BT;
break;
case SPEED_100:
ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
@@ -118,10 +140,95 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
if (ret < 0)
return ret;
+ if (genphy_c45_baset1_able(phydev)) {
+ int ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ break;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl);
+ if (ret < 0)
+ return ret;
+ }
+
return genphy_c45_an_disable_aneg(phydev);
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
+/* Sets master/slave preference and supported technologies.
+ * The preference is set in the BIT(4) of BASE-T1 AN
+ * advertisement register 7.515 and whether the status
+ * is forced or not, it is set in the BIT(12) of BASE-T1
+ * AN advertisement register 7.514.
+ * Sets 10BASE-T1L Ability BIT(14) in BASE-T1 autonegotiation
+ * advertisement register [31:16] if supported.
+ */
+static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
+{
+ int changed = 0;
+ u16 adv_l = 0;
+ u16 adv_m = 0;
+ int ret;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
+ break;
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ default:
+ break;
+ }
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ default:
+ break;
+ }
+
+ adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
+ (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP
+ | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
+ MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ return changed;
+}
+
/**
* genphy_c45_an_config_aneg - configure advertisement registers
* @phydev: target phy_device struct
@@ -141,6 +248,9 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev)
changed = genphy_config_eee_advert(phydev);
+ if (genphy_c45_baset1_able(phydev))
+ return genphy_c45_baset1_an_config_aneg(phydev);
+
adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
@@ -178,8 +288,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg);
*/
int genphy_c45_an_disable_aneg(struct phy_device *phydev)
{
+ u16 reg = MDIO_CTRL1;
- return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
@@ -194,7 +308,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
*/
int genphy_c45_restart_aneg(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ u16 reg = MDIO_CTRL1;
+
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, reg,
MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
@@ -210,11 +329,15 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
*/
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
+ u16 reg = MDIO_CTRL1;
int ret;
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
if (!restart) {
/* Configure and restart aneg if it wasn't set before */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
if (ret < 0)
return ret;
@@ -242,7 +365,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg);
*/
int genphy_c45_aneg_done(struct phy_device *phydev)
{
- int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ int reg = MDIO_STAT1;
+ int val;
+
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_STAT;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0;
}
@@ -307,6 +436,49 @@ int genphy_c45_read_link(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_link);
+/* Read the Clause 45 defined BASE-T1 AN (7.513) status register to check
+ * if autoneg is complete. If so read the BASE-T1 Autonegotiation
+ * Advertisement registers filling in the link partner advertisement,
+ * pause and asym_pause members in phydev.
+ */
+static int genphy_c45_baset1_read_lpa(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising);
+ mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0);
+ mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0);
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+ }
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, 1);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_L);
+ if (val < 0)
+ return val;
+
+ mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val);
+ phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M);
+ if (val < 0)
+ return val;
+
+ mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, val);
+
+ return 0;
+}
+
/**
* genphy_c45_read_lpa - read the link partner advertisement and pause
* @phydev: target phy_device struct
@@ -321,6 +493,9 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
+ if (genphy_c45_baset1_able(phydev))
+ return genphy_c45_baset1_read_lpa(phydev);
+
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
if (val < 0)
return val;
@@ -399,6 +574,17 @@ int genphy_c45_read_pma(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
+ if (genphy_c45_baset1_able(phydev)) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL);
+ if (val < 0)
+ return val;
+
+ if (MDIO_PMA_PMD_BT1_CTRL_CFG_MST)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+ else
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
@@ -530,12 +716,67 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
phydev->supported,
val & MDIO_PMA_NG_EXTABLE_5GBT);
}
+
+ if (val & MDIO_PMA_EXTABLE_BT1) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_PMD_BT1_B10L_ABLE);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported,
+ val & MDIO_AN_STAT1_ABLE);
+ }
}
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities);
+/* Read master/slave preference from registers.
+ * The preference is read from the BIT(4) of BASE-T1 AN
+ * advertisement register 7.515 and whether the preference
+ * is forced or not, it is read from BASE-T1 AN advertisement
+ * register 7.514.
+ */
+static int genphy_c45_baset1_read_status(struct phy_device *phydev)
+{
+ int ret;
+ int cfg;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L);
+ if (ret < 0)
+ return ret;
+
+ cfg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M);
+ if (cfg < 0)
+ return cfg;
+
+ if (ret & MDIO_AN_T1_ADV_L_FORCE_MS) {
+ if (cfg & MDIO_AN_T1_ADV_M_MST)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ } else {
+ if (cfg & MDIO_AN_T1_ADV_M_MST)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_PREFERRED;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_PREFERRED;
+ }
+
+ return 0;
+}
+
/**
* genphy_c45_read_status - read PHY status
* @phydev: target phy_device struct
@@ -560,6 +801,12 @@ int genphy_c45_read_status(struct phy_device *phydev)
if (ret)
return ret;
+ if (genphy_c45_baset1_able(phydev)) {
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
phy_resolve_aneg_linkmode(phydev);
} else {
ret = genphy_c45_read_pma(phydev);
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 2001f3329133..1f2531a1a876 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -176,6 +176,7 @@ static const struct phy_setting settings[] = {
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
+ PHY_SETTING( 10, FULL, 10baseT1L_Full ),
};
#undef PHY_SETTING
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index beb2b66da132..9034c6a8e18f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -295,20 +295,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
+ mii_data->val_out = mdiobus_c45_read(
+ phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num);
} else {
- prtad = mii_data->phy_id;
- devad = mii_data->reg_num;
+ mii_data->val_out = mdiobus_read(
+ phydev->mdio.bus, mii_data->phy_id,
+ mii_data->reg_num);
}
- mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad,
- devad);
return 0;
case SIOCSMIIREG:
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
} else {
prtad = mii_data->phy_id;
devad = mii_data->reg_num;
@@ -351,7 +351,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
}
- mdiobus_write(phydev->mdio.bus, prtad, devad, val);
+ if (mdio_phy_id_is_c45(mii_data->phy_id))
+ mdiobus_c45_write(phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num, val);
+ else
+ mdiobus_write(phydev->mdio.bus, prtad, devad, val);
if (prtad == phydev->mdio.addr &&
devad == MII_BMCR &&
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8406ac739def..431a8719c635 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -90,8 +90,9 @@ const int phy_10_100_features_array[4] = {
};
EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-const int phy_basic_t1_features_array[2] = {
+const int phy_basic_t1_features_array[3] = {
ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
@@ -599,6 +600,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
dev->autoneg = AUTONEG_ENABLE;
+ dev->pma_extable = -ENODATA;
dev->is_c45 = is_c45;
dev->phy_id = phy_id;
if (c45_ids)
@@ -1449,6 +1451,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->state = PHY_READY;
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1471,10 +1475,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
- err = phy_disable_interrupts(phydev);
- if (err)
- return err;
-
phy_resume(phydev);
phy_led_triggers_register(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 06943889d747..066684b80919 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -168,8 +168,10 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
if (caps & MAC_10HD)
__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
- if (caps & MAC_10FD)
+ if (caps & MAC_10FD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
+ }
if (caps & MAC_100HD) {
__set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
@@ -2301,8 +2303,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = mdiobus_c45_addr(devad, reg);
- } else if (phydev->is_c45) {
+ return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad,
+ reg);
+ }
+
+ if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
case MII_BMSR:
@@ -2324,12 +2329,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
return -EINVAL;
}
prtad = phy_id;
- devad = mdiobus_c45_addr(devad, reg);
- } else {
- prtad = phy_id;
- devad = reg;
+ return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad,
+ reg);
}
- return mdiobus_read(pl->phydev->mdio.bus, prtad, devad);
+
+ return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg);
}
static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
@@ -2341,8 +2345,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = mdiobus_c45_addr(devad, reg);
- } else if (phydev->is_c45) {
+ return mdiobus_c45_write(pl->phydev->mdio.bus, prtad, devad,
+ reg, val);
+ }
+
+ if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
case MII_BMSR:
@@ -2363,14 +2370,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
default:
return -EINVAL;
}
- prtad = phy_id;
- devad = mdiobus_c45_addr(devad, reg);
- } else {
- prtad = phy_id;
- devad = reg;
+ return mdiobus_c45_write(pl->phydev->mdio.bus, phy_id, devad,
+ reg, val);
}
- return mdiobus_write(phydev->mdio.bus, prtad, devad, val);
+ return mdiobus_write(phydev->mdio.bus, phy_id, reg, val);
}
static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
@@ -2778,34 +2782,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
/* Helpers for MAC drivers */
-/**
- * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
- * @state: a pointer to a &struct phylink_link_state
- *
- * Inspect the interface mode, advertising mask or forced speed and
- * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
- * the interface mode to suit. @state->interface is appropriately
- * updated, and the advertising mask has the "other" baseX_Full flag
- * cleared.
- */
-void phylink_helper_basex_speed(struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_8023z(state->interface)) {
- bool want_2500 = state->an_enabled ?
- phylink_test(state->advertising, 2500baseX_Full) :
- state->speed == SPEED_2500;
-
- if (want_2500) {
- phylink_clear(state->advertising, 1000baseX_Full);
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- } else {
- phylink_clear(state->advertising, 2500baseX_Full);
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- }
- }
-}
-EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-
static void phylink_decode_c37_word(struct phylink_link_state *state,
uint16_t config_reg, int speed)
{
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520340b7..1b41cd9732d7 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1011,8 +1011,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
goto end;
}
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &error);
+ skb = skb_recv_datagram(sk, flags, &error);
if (error < 0)
goto end;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9b4dfa3001d6..2de09ad5bac0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf)
* device MAC address has been updated). Always set MAC address to that of the
* device.
*/
-static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
return 1;
@@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup);
/* Ensure correct link state
*
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 15f91d691bba..cdca00c0dc1f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
struct usbnet *dev = ctx->dev;
- spin_lock_bh(&ctx->mtx);
+ spin_lock(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
} else if (dev->net != NULL) {
ctx->tx_reason_timeout++; /* count reason for transmitting */
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
netif_tx_unlock_bh(dev->net);
} else {
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
}
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 415f16662f88..94e571fb61da 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -92,8 +92,6 @@
WAKE_MCAST | WAKE_BCAST | \
WAKE_ARP | WAKE_MAGIC)
-#define LAN78XX_NAPI_WEIGHT 64
-
#define TX_URB_NUM 10
#define TX_SS_URB_NUM TX_URB_NUM
#define TX_HS_URB_NUM TX_URB_NUM
@@ -4376,7 +4374,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
- netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3353e761016d..79f8bd849b1a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
- skbn->dev = net;
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
@@ -1351,6 +1350,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 247f58cb0f84..4e70dec30e5a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
- if (bp[0] & 0x02)
- eth_hw_addr_random(net);
- else
- eth_hw_addr_set(net, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
@@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS);
}
+static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = rndis_bind(dev, intf);
+
+ if (!status && (dev->net->dev_addr[0] & 0x02))
+ eth_hw_addr_random(dev->net);
+
+ return status;
+}
+
void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct rndis_halt *halt;
@@ -485,10 +492,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ bool dst_mac_fixup;
+
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
+ dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP);
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
@@ -523,10 +534,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
skb_pull(skb, msg_len - sizeof *hdr);
skb_trim(skb2, data_len);
+
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb2);
+
usbnet_skb_return(dev, skb2);
}
/* caller will usbnet_skb_return the remaining packet */
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb);
+
return 1;
}
EXPORT_SYMBOL_GPL(rndis_rx_fixup);
@@ -600,6 +618,17 @@ static const struct driver_info rndis_poll_status_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info zte_rndis_info = {
+ .description = "ZTE RNDIS device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP,
+ .bind = zte_rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -614,6 +643,16 @@ static const struct usb_device_id products [] = {
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, {
+ /* ZTE WWAN modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
+ /* ZTE WWAN modules, ACM flavour */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
index 18f670251275..952e6f7c0321 100644
--- a/drivers/net/usb/sr9800.h
+++ b/drivers/net/usb/sr9800.h
@@ -163,7 +163,7 @@
#define SR9800_MAX_BULKIN_24K 6
#define SR9800_MAX_BULKIN_32K 7
-struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
/* 2k */
{2048, 0x8000, 0x8001},
/* 4k */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eb0121a64d6d..3592014e50cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1375,7 +1375,7 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
+ priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
if (!priv->rq)
return -ENOMEM;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 140780ac1745..dcb069dde66b 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -23,78 +23,6 @@ menuconfig WAN
if WAN
-# There is no way to detect a comtrol sv11 - force it modular for now.
-config HOSTESS_SV11
- tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- Driver for Comtrol Hostess SV-11 network card which
- operates on low speed synchronous serial links at up to
- 256Kbps, supporting PPP and Cisco HDLC.
-
- The driver will be compiled as a module: the
- module will be called hostess_sv11.
-
-# The COSA/SRP driver has not been tested as non-modular yet.
-config COSA
- tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
- help
- Driver for COSA and SRP synchronous serial boards.
-
- These boards allow to connect synchronous serial devices (for example
- base-band modems, or any other device with the X.21, V.24, V.35 or
- V.36 interface) to your Linux box. The cards can work as the
- character device, synchronous PPP network device, or the Cisco HDLC
- network device.
-
- You will need user-space utilities COSA or SRP boards for downloading
- the firmware to the cards and to set them up. Look at the
- <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
- read the comment at the top of the <file:drivers/net/wan/cosa.c> for
- details about the cards and the driver itself.
-
- The driver will be compiled as a module: the
- module will be called cosa.
-
-#
-# Lan Media's board. Currently 1000, 1200, 5200, 5245
-#
-config LANMEDIA
- tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
- depends on PCI && VIRT_TO_BUS && HDLC
- help
- Driver for the following Lan Media family of serial boards:
-
- - LMC 1000 board allows you to connect synchronous serial devices
- (for example base-band modems, or any other device with the X.21,
- V.24, V.35 or V.36 interface) to your Linux box.
-
- - LMC 1200 with on board DSU board allows you to connect your Linux
- box directly to a T1 or E1 circuit.
-
- - LMC 5200 board provides a HSSI interface capable of running up to
- 52 Mbits per second.
-
- - LMC 5245 board connects directly to a T3 circuit saving the
- additional external hardware.
-
- To change setting such as clock source you will need lmcctl.
- It is available at <ftp://ftp.lanmedia.com/> (broken link).
-
- To compile this driver as a module, choose M here: the
- module will be called lmc.
-
-# There is no way to detect a Sealevel board. Force it modular
-config SEALEVEL_4021
- tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
-
- The driver will be compiled as a module: the
- module will be called sealevel.
-
# Generic HDLC
config HDLC
tristate "Generic HDLC layer"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 480bcd1f6c1c..5bec8fae47f8 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,13 +14,8 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
-obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
-obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
-obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
-obj-$(CONFIG_LANMEDIA) += lmc/
-
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_N2) += n2.o
obj-$(CONFIG_C101) += c101.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
deleted file mode 100644
index 1e5672019922..000000000000
--- a/drivers/net/wan/cosa.c
+++ /dev/null
@@ -1,2052 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-
-/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-/* The driver for the SRP and COSA synchronous serial cards.
- *
- * HARDWARE INFO
- *
- * Both cards are developed at the Institute of Computer Science,
- * Masaryk University (https://www.ics.muni.cz/). The hardware is
- * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
- * and the photo of both cards is available at
- * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
- * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
- * For Linux-specific utilities, see below in the "Software info" section.
- * If you want to order the card, contact Jiri Novotny.
- *
- * The SRP (serial port?, the Czech word "srp" means "sickle") card
- * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
- * with V.24 interfaces up to 80kb/s each.
- *
- * The COSA (communication serial adapter?, the Czech word "kosa" means
- * "scythe") is a next-generation sync/async board with two interfaces
- * - currently any of V.24, X.21, V.35 and V.36 can be selected.
- * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
- * The 8-channels version is in development.
- *
- * Both types have downloadable firmware and communicate via ISA DMA.
- * COSA can be also a bus-mastering device.
- *
- * SOFTWARE INFO
- *
- * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/.
- * The CVS tree of Linux driver can be viewed there, as well as the
- * firmware binaries and user-space utilities for downloading the firmware
- * into the card and setting up the card.
- *
- * The Linux driver (unlike the present *BSD drivers :-) can work even
- * for the COSA and SRP in one computer and allows each channel to work
- * in one of the two modes (character or network device).
- *
- * AUTHOR
- *
- * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
- *
- * You can mail me bugfixes and even success reports. I am especially
- * interested in the SMP and/or muliti-channel success/failure reports
- * (I wonder if I did the locking properly :-).
- *
- * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
- *
- * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
- * The skeleton.c by Donald Becker
- * The SDL Riscom/N2 driver by Mike Natale
- * The Comtrol Hostess SV11 driver by Alan Cox
- * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-
-#undef COSA_SLOW_IO /* for testing purposes only */
-
-#include "cosa.h"
-
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
-
-/* Per-channel data structure */
-
-struct channel_data {
- int usage; /* Usage count; >0 for chrdev, -1 for netdev */
- int num; /* Number of the channel */
- struct cosa_data *cosa; /* Pointer to the per-card structure */
- int txsize; /* Size of transmitted data */
- char *txbuf; /* Transmit buffer */
- char name[COSA_MAX_NAME]; /* channel name */
-
- /* The HW layer interface */
- /* routine called from the RX interrupt */
- char *(*setup_rx)(struct channel_data *channel, int size);
- /* routine called when the RX is done (from the EOT interrupt) */
- int (*rx_done)(struct channel_data *channel);
- /* routine called when the TX is done (from the EOT interrupt) */
- int (*tx_done)(struct channel_data *channel, int size);
-
- /* Character device parts */
- struct mutex rlock;
- struct semaphore wsem;
- char *rxdata;
- int rxsize;
- wait_queue_head_t txwaitq, rxwaitq;
- int tx_status, rx_status;
-
- /* generic HDLC device parts */
- struct net_device *netdev;
- struct sk_buff *rx_skb, *tx_skb;
-};
-
-/* cosa->firmware_status bits */
-#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
-#define COSA_FW_START BIT(2) /* Is the microcode running? */
-
-struct cosa_data {
- int num; /* Card number */
- char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
- unsigned int datareg, statusreg; /* I/O ports */
- unsigned short irq, dma; /* IRQ and DMA number */
- unsigned short startaddr; /* Firmware start address */
- unsigned short busmaster; /* Use busmastering? */
- int nchannels; /* # of channels on this card */
- int driver_status; /* For communicating with firmware */
- int firmware_status; /* Downloaded, reseted, etc. */
- unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */
- unsigned long rxtx; /* RX or TX in progress? */
- int enabled;
- int usage; /* usage count */
- int txchan, txsize, rxsize;
- struct channel_data *rxchan;
- char *bouncebuf;
- char *txbuf, *rxbuf;
- struct channel_data *chan;
- spinlock_t lock; /* For exclusive operations on this structure */
- char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
- char *type; /* card type */
-};
-
-/* Define this if you want all the possible ports to be autoprobed.
- * It is here but it probably is not a good idea to use this.
- */
-/* #define COSA_ISA_AUTOPROBE 1*/
-
-/* Character device major number. 117 was allocated for us.
- * The value of 0 means to allocate a first free one.
- */
-static DEFINE_MUTEX(cosa_chardev_mutex);
-static int cosa_major = 117;
-
-/* Encoding of the minor numbers:
- * The lowest CARD_MINOR_BITS bits means the channel on the single card,
- * the highest bits means the card number.
- */
-#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card
- */
-/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
- * macro doesn't like anything other than the raw number as an argument :-(
- */
-#define MAX_CARDS 16
-/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
-
-#define DRIVER_RX_READY 0x0001
-#define DRIVER_TX_READY 0x0002
-#define DRIVER_TXMAP_SHIFT 2
-#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-
-/* for cosa->rxtx - indicates whether either transmit or receive is
- * in progress. These values are mean number of the bit.
- */
-#define TXBIT 0
-#define RXBIT 1
-#define IRQBIT 2
-
-#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
-
-#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
-#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
-#undef DEBUG_IO //1 /* Dump the I/O traffic */
-
-#define TX_TIMEOUT (5 * HZ)
-
-/* Maybe the following should be allocated dynamically */
-static struct cosa_data cosa_cards[MAX_CARDS];
-static int nr_cards;
-
-#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
-/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
-#else
-static int io[MAX_CARDS + 1];
-static int dma[MAX_CARDS + 1];
-#endif
-/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
-
-/* for class stuff*/
-static struct class *cosa_class;
-
-#ifdef MODULE
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
-module_param_hw_array(dma, int, dma, NULL, 0);
-MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
-
-MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
-MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
-MODULE_LICENSE("GPL");
-#endif
-
-/* I use this mainly for testing purposes */
-#ifdef COSA_SLOW_IO
-#define cosa_outb outb_p
-#define cosa_outw outw_p
-#define cosa_inb inb_p
-#define cosa_inw inw_p
-#else
-#define cosa_outb outb
-#define cosa_outw outw
-#define cosa_inb inb
-#define cosa_inw inw
-#endif
-
-#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-
-#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
-#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
-
-/* Initialization stuff */
-static int cosa_probe(int ioaddr, int irq, int dma);
-
-/* HW interface */
-static void cosa_enable_rx(struct channel_data *chan);
-static void cosa_disable_rx(struct channel_data *chan);
-static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
-static void cosa_kick(struct cosa_data *cosa);
-static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
-
-/* Network device stuff */
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity);
-static int cosa_net_open(struct net_device *d);
-static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
-static char *cosa_net_setup_rx(struct channel_data *channel, int size);
-static int cosa_net_rx_done(struct channel_data *channel);
-static int cosa_net_tx_done(struct channel_data *channel, int size);
-
-/* Character device */
-static char *chrdev_setup_rx(struct channel_data *channel, int size);
-static int chrdev_rx_done(struct channel_data *channel);
-static int chrdev_tx_done(struct channel_data *channel, int size);
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static unsigned int cosa_poll(struct file *file, poll_table *poll);
-static int cosa_open(struct inode *inode, struct file *file);
-static int cosa_release(struct inode *inode, struct file *file);
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-#ifdef COSA_FASYNC_WORKING
-static int cosa_fasync(struct inode *inode, struct file *file, int on);
-#endif
-
-static const struct file_operations cosa_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cosa_read,
- .write = cosa_write,
- .poll = cosa_poll,
- .unlocked_ioctl = cosa_chardev_ioctl,
- .open = cosa_open,
- .release = cosa_release,
-#ifdef COSA_FASYNC_WORKING
- .fasync = cosa_fasync,
-#endif
-};
-
-/* Ioctls */
-static int cosa_start(struct cosa_data *cosa, int address);
-static int cosa_reset(struct cosa_data *cosa);
-static int cosa_download(struct cosa_data *cosa, void __user *a);
-static int cosa_readmem(struct cosa_data *cosa, void __user *a);
-
-/* COSA/SRP ROM monitor */
-static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
-static int startmicrocode(struct cosa_data *cosa, int address);
-static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
-
-/* Auxiliary functions */
-static int get_wait_data(struct cosa_data *cosa);
-static int put_wait_data(struct cosa_data *cosa, int data);
-static int puthexnumber(struct cosa_data *cosa, int number);
-static void put_driver_status(struct cosa_data *cosa);
-static void put_driver_status_nolock(struct cosa_data *cosa);
-
-/* Interrupt handling */
-static irqreturn_t cosa_interrupt(int irq, void *cosa);
-
-/* I/O ops debugging */
-#ifdef DEBUG_IO
-static void debug_data_in(struct cosa_data *cosa, int data);
-static void debug_data_out(struct cosa_data *cosa, int data);
-static void debug_data_cmd(struct cosa_data *cosa, int data);
-static void debug_status_in(struct cosa_data *cosa, int status);
-static void debug_status_out(struct cosa_data *cosa, int status);
-#endif
-
-static inline struct channel_data *dev_to_chan(struct net_device *dev)
-{
- return (struct channel_data *)dev_to_hdlc(dev)->priv;
-}
-
-/* ---------- Initialization stuff ---------- */
-
-static int __init cosa_init(void)
-{
- int i, err = 0;
-
- if (cosa_major > 0) {
- if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
- pr_warn("unable to get major %d\n", cosa_major);
- err = -EIO;
- goto out;
- }
- } else {
- cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (cosa_major < 0) {
- pr_warn("unable to register chardev\n");
- err = -EIO;
- goto out;
- }
- }
- for (i = 0; i < MAX_CARDS; i++)
- cosa_cards[i].num = -1;
- for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
- cosa_probe(io[i], irq[i], dma[i]);
- if (!nr_cards) {
- pr_warn("no devices found\n");
- unregister_chrdev(cosa_major, "cosa");
- err = -ENODEV;
- goto out;
- }
- cosa_class = class_create(THIS_MODULE, "cosa");
- if (IS_ERR(cosa_class)) {
- err = PTR_ERR(cosa_class);
- goto out_chrdev;
- }
- for (i = 0; i < nr_cards; i++)
- device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL,
- "cosa%d", i);
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(cosa_major, "cosa");
-out:
- return err;
-}
-module_init(cosa_init);
-
-static void __exit cosa_exit(void)
-{
- struct cosa_data *cosa;
- int i;
-
- for (i = 0; i < nr_cards; i++)
- device_destroy(cosa_class, MKDEV(cosa_major, i));
- class_destroy(cosa_class);
-
- for (cosa = cosa_cards; nr_cards--; cosa++) {
- /* Clean up the per-channel data */
- for (i = 0; i < cosa->nchannels; i++) {
- /* Chardev driver has no alloc'd per-channel data */
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- /* Clean up the per-card data */
- kfree(cosa->chan);
- kfree(cosa->bouncebuf);
- free_irq(cosa->irq, cosa);
- free_dma(cosa->dma);
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- }
- unregister_chrdev(cosa_major, "cosa");
-}
-module_exit(cosa_exit);
-
-static const struct net_device_ops cosa_ops = {
- .ndo_open = cosa_net_open,
- .ndo_stop = cosa_net_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_tx_timeout = cosa_net_timeout,
-};
-
-static int cosa_probe(int base, int irq, int dma)
-{
- struct cosa_data *cosa = cosa_cards + nr_cards;
- int i, err = 0;
-
- memset(cosa, 0, sizeof(struct cosa_data));
-
- /* Checking validity of parameters: */
- /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
- if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
- pr_info("invalid IRQ %d\n", irq);
- return -1;
- }
- /* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8.
- */
- if (base < 0x100 || base > 0x3ff || base & 0x7) {
- pr_info("invalid I/O address 0x%x\n", base);
- return -1;
- }
- /* DMA should be 0,1 or 3-7 */
- if (dma < 0 || dma == 4 || dma > 7) {
- pr_info("invalid DMA %d\n", dma);
- return -1;
- }
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10
- */
- if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
- pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
- base, dma);
- return -1;
- }
-
- cosa->dma = dma;
- cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
- spin_lock_init(&cosa->lock);
-
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
- return -1;
-
- if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
- printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
- err = -1;
- goto err_out;
- }
-
- /* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3)) {
- cosa->type = "srp";
- } else if (!strncmp(cosa->id_string, "COSA", 4)) {
- cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
- } else {
-/* Print a warning only if we are not autoprobing */
-#ifndef COSA_ISA_AUTOPROBE
- pr_info("valid signature not found at 0x%x\n", base);
-#endif
- err = -1;
- goto err_out;
- }
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa) ? 2 : 4);
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
- printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
- return -1;
- }
-
- /* Now do IRQ autoprobe */
- if (irq < 0) {
- unsigned long irqs;
-/* pr_info("IRQ autoprobe\n"); */
- irqs = probe_irq_on();
- /* Enable interrupt on tx buffer empty (it sure is)
- * really sure ?
- * FIXME: When this code is not used as module, we should
- * probably call udelay() instead of the interruptible sleep.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(msecs_to_jiffies(300));
- irq = probe_irq_off(irqs);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
- /* Empty the received data register */
- cosa_getdata8(cosa);
-
- if (irq < 0) {
- pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
- irq, cosa->datareg);
- err = -1;
- goto err_out;
- }
- if (irq == 0) {
- pr_info("no interrupt obtained (board at 0x%x)\n",
- cosa->datareg);
- /* return -1; */
- }
- }
-
- cosa->irq = irq;
- cosa->num = nr_cards;
- cosa->usage = 0;
- cosa->nchannels = 2; /* FIXME: how to determine this? */
-
- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
- err = -1;
- goto err_out;
- }
- if (request_dma(cosa->dma, cosa->type)) {
- err = -1;
- goto err_out1;
- }
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
- if (!cosa->bouncebuf) {
- err = -ENOMEM;
- goto err_out2;
- }
- sprintf(cosa->name, "cosa%d", cosa->num);
-
- /* Initialize the per-channel data */
- cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
- if (!cosa->chan) {
- err = -ENOMEM;
- goto err_out3;
- }
-
- for (i = 0; i < cosa->nchannels; i++) {
- struct channel_data *chan = &cosa->chan[i];
-
- chan->cosa = cosa;
- chan->num = i;
- sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
-
- /* Initialize the chardev data structures */
- mutex_init(&chan->rlock);
- sema_init(&chan->wsem, 1);
-
- /* Register the network interface */
- chan->netdev = alloc_hdlcdev(chan);
- if (!chan->netdev) {
- pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
- err = -ENOMEM;
- goto err_hdlcdev;
- }
- dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
- dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
- chan->netdev->netdev_ops = &cosa_ops;
- chan->netdev->watchdog_timeo = TX_TIMEOUT;
- chan->netdev->base_addr = chan->cosa->datareg;
- chan->netdev->irq = chan->cosa->irq;
- chan->netdev->dma = chan->cosa->dma;
- err = register_hdlc_device(chan->netdev);
- if (err) {
- netdev_warn(chan->netdev,
- "register_hdlc_device() failed\n");
- free_netdev(chan->netdev);
- goto err_hdlcdev;
- }
- }
-
- pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
- cosa->num, cosa->id_string, cosa->type,
- cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
-
- return nr_cards++;
-
-err_hdlcdev:
- while (i-- > 0) {
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- kfree(cosa->chan);
-err_out3:
- kfree(cosa->bouncebuf);
-err_out2:
- free_dma(cosa->dma);
-err_out1:
- free_irq(cosa->irq, cosa);
-err_out:
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- pr_notice("cosa%d: allocating resources failed\n", cosa->num);
- return err;
-}
-
-/*---------- network device ---------- */
-
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static int cosa_net_open(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- int err;
- unsigned long flags;
-
- if (!(chan->cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- chan->cosa->name, chan->cosa->firmware_status);
- return -EPERM;
- }
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->usage != 0) {
- pr_warn("%s: cosa_net_open called with usage count %d\n",
- chan->name, chan->usage);
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return -EBUSY;
- }
- chan->setup_rx = cosa_net_setup_rx;
- chan->tx_done = cosa_net_tx_done;
- chan->rx_done = cosa_net_rx_done;
- chan->usage = -1;
- chan->cosa->usage++;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
-
- err = hdlc_open(dev);
- if (err) {
- spin_lock_irqsave(&chan->cosa->lock, flags);
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return err;
- }
-
- netif_start_queue(dev);
- cosa_enable_rx(chan);
- return 0;
-}
-
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- netif_stop_queue(dev);
-
- chan->tx_skb = skb;
- cosa_start_tx(chan, skb->data, skb->len);
- return NETDEV_TX_OK;
-}
-
-static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- if (test_bit(RXBIT, &chan->cosa->rxtx)) {
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_missed_errors++;
- } else {
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- }
- cosa_kick(chan->cosa);
- if (chan->tx_skb) {
- dev_kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- netif_wake_queue(dev);
-}
-
-static int cosa_net_close(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- unsigned long flags;
-
- netif_stop_queue(dev);
- hdlc_close(dev);
- cosa_disable_rx(chan);
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->rx_skb) {
- kfree_skb(chan->rx_skb);
- chan->rx_skb = NULL;
- }
- if (chan->tx_skb) {
- kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return 0;
-}
-
-static char *cosa_net_setup_rx(struct channel_data *chan, int size)
-{
- /* We can safely fall back to non-dma-able memory, because we have
- * the cosa->bouncebuf pre-allocated.
- */
- kfree_skb(chan->rx_skb);
- chan->rx_skb = dev_alloc_skb(size);
- if (!chan->rx_skb) {
- pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
- chan->netdev->stats.rx_dropped++;
- return NULL;
- }
- netif_trans_update(chan->netdev);
- return skb_put(chan->rx_skb, size);
-}
-
-static int cosa_net_rx_done(struct channel_data *chan)
-{
- if (!chan->rx_skb) {
- pr_warn("%s: rx_done with empty skb!\n", chan->name);
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_frame_errors++;
- return 0;
- }
- chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
- chan->rx_skb->dev = chan->netdev;
- skb_reset_mac_header(chan->rx_skb);
- chan->netdev->stats.rx_packets++;
- chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
- netif_rx(chan->rx_skb);
- chan->rx_skb = NULL;
- return 0;
-}
-
-/* ARGSUSED */
-static int cosa_net_tx_done(struct channel_data *chan, int size)
-{
- if (!chan->tx_skb) {
- pr_warn("%s: tx_done with empty skb!\n", chan->name);
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- return 1;
- }
- dev_consume_skb_irq(chan->tx_skb);
- chan->tx_skb = NULL;
- chan->netdev->stats.tx_packets++;
- chan->netdev->stats.tx_bytes += size;
- netif_wake_queue(chan->netdev);
- return 1;
-}
-
-/*---------- Character device ---------- */
-
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (mutex_lock_interruptible(&chan->rlock))
- return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
- if (!chan->rxdata) {
- mutex_unlock(&chan->rlock);
- return -ENOMEM;
- }
-
- chan->rx_status = 0;
- cosa_enable_rx(chan);
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->rxwaitq, &wait);
- while (!chan->rx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->rx_status == 0) {
- chan->rx_status = 1;
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- kbuf = chan->rxdata;
- count = chan->rxsize;
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
-
- if (copy_to_user(buf, kbuf, count)) {
- kfree(kbuf);
- return -EFAULT;
- }
- kfree(kbuf);
- return count;
-}
-
-static char *chrdev_setup_rx(struct channel_data *chan, int size)
-{
- /* Expect size <= COSA_MTU */
- chan->rxsize = size;
- return chan->rxdata;
-}
-
-static int chrdev_rx_done(struct channel_data *chan)
-{
- if (chan->rx_status) { /* Reader has died */
- kfree(chan->rxdata);
- up(&chan->wsem);
- }
- chan->rx_status = 1;
- wake_up_interruptible(&chan->rxwaitq);
- return 1;
-}
-
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (down_interruptible(&chan->wsem))
- return -ERESTARTSYS;
-
- if (count > COSA_MTU)
- count = COSA_MTU;
-
- /* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
- if (!kbuf) {
- up(&chan->wsem);
- return -ENOMEM;
- }
- if (copy_from_user(kbuf, buf, count)) {
- up(&chan->wsem);
- kfree(kbuf);
- return -EFAULT;
- }
- chan->tx_status = 0;
- cosa_start_tx(chan, kbuf, count);
-
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->txwaitq, &wait);
- while (!chan->tx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->tx_status == 0) {
- chan->tx_status = 1;
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- chan->tx_status = 1;
- spin_unlock_irqrestore(&cosa->lock, flags);
- up(&chan->wsem);
- kfree(kbuf);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- up(&chan->wsem);
- spin_unlock_irqrestore(&cosa->lock, flags);
- kfree(kbuf);
- return count;
-}
-
-static int chrdev_tx_done(struct channel_data *chan, int size)
-{
- if (chan->tx_status) { /* Writer was interrupted */
- kfree(chan->txbuf);
- up(&chan->wsem);
- }
- chan->tx_status = 1;
- wake_up_interruptible(&chan->txwaitq);
- return 1;
-}
-
-static __poll_t cosa_poll(struct file *file, poll_table *poll)
-{
- pr_info("cosa_poll is here\n");
- return 0;
-}
-
-static int cosa_open(struct inode *inode, struct file *file)
-{
- struct cosa_data *cosa;
- struct channel_data *chan;
- unsigned long flags;
- int n;
- int ret = 0;
-
- mutex_lock(&cosa_chardev_mutex);
- n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
- if (n >= nr_cards) {
- ret = -ENODEV;
- goto out;
- }
- cosa = cosa_cards + n;
-
- n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
- if (n >= cosa->nchannels) {
- ret = -ENODEV;
- goto out;
- }
- chan = cosa->chan + n;
-
- file->private_data = chan;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- if (chan->usage < 0) { /* in netdev mode */
- spin_unlock_irqrestore(&cosa->lock, flags);
- ret = -EBUSY;
- goto out;
- }
- cosa->usage++;
- chan->usage++;
-
- chan->tx_done = chrdev_tx_done;
- chan->setup_rx = chrdev_setup_rx;
- chan->rx_done = chrdev_rx_done;
- spin_unlock_irqrestore(&cosa->lock, flags);
-out:
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-static int cosa_release(struct inode *inode, struct file *file)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- unsigned long flags;
-
- cosa = channel->cosa;
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->usage--;
- channel->usage--;
- spin_unlock_irqrestore(&cosa->lock, flags);
- return 0;
-}
-
-#ifdef COSA_FASYNC_WORKING
-static struct fasync_struct *fasync[256] = { NULL, };
-
-/* To be done ... */
-static int cosa_fasync(struct inode *inode, struct file *file, int on)
-{
- int port = iminor(inode);
-
- return fasync_helper(inode, file, on, &fasync[port]);
-}
-#endif
-
-/* ---------- Ioctls ---------- */
-
-/* Ioctl subroutines can safely be made inline, because they are called
- * only from cosa_ioctl().
- */
-static inline int cosa_reset(struct cosa_data *cosa)
-{
- char idstring[COSA_MAX_ID_STRING];
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
- if (cosa_reset_and_read_id(cosa, idstring) < 0) {
- pr_notice("cosa%d: reset failed\n", cosa->num);
- return -EIO;
- }
- pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to download data into COSA memory. Calls download() */
-static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->name, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
- if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
-
- i = download(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: microcode download failed: %d\n",
- cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
- return 0;
-}
-
-/* High-level function to read COSA memory. Calls readmem() */
-static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~COSA_FW_RESET;
-
- i = readmem(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to start microcode. Calls startmicrocode(). */
-static inline int cosa_start(struct cosa_data *cosa, int address)
-{
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
-
- if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
- pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- cosa->firmware_status &= ~COSA_FW_RESET;
- i = startmicrocode(cosa, address);
- if (i < 0) {
- pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
- cosa->num, address, i);
- return -EIO;
- }
- pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
- cosa->startaddr = address;
- cosa->firmware_status |= COSA_FW_START;
- return 0;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->id_string) + 1;
-
- if (copy_to_user(string, cosa->id_string, l))
- return -EFAULT;
- return l;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->type) + 1;
-
- if (copy_to_user(string, cosa->type, l))
- return -EFAULT;
- return l;
-}
-
-static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case COSAIORSET: /* Reset the device */
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return cosa_reset(cosa);
- case COSAIOSTRT: /* Start the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_start(cosa, arg);
- case COSAIODOWNLD: /* Download the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
-
- return cosa_download(cosa, argp);
- case COSAIORMEM:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_readmem(cosa, argp);
- case COSAIORTYPE:
- return cosa_gettype(cosa, argp);
- case COSAIORIDSTR:
- return cosa_getidstr(cosa, argp);
- case COSAIONRCARDS:
- return nr_cards;
- case COSAIONRCHANS:
- return cosa->nchannels;
- case COSAIOBMSET:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- if (is_8bit(cosa))
- return -EINVAL;
- if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
- return -EINVAL;
- cosa->busmaster = arg;
- return 0;
- case COSAIOBMGET:
- return cosa->busmaster;
- }
- return -ENOIOCTLCMD;
-}
-
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- long ret;
-
- mutex_lock(&cosa_chardev_mutex);
- cosa = channel->cosa;
- ret = cosa_ioctl_common(cosa, channel, cmd, arg);
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-/*---------- HW layer interface ---------- */
-
-/* The higher layer can bind itself to the HW layer by setting the callbacks
- * in the channel_data structure and by using these routines.
- */
-static void cosa_enable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-static void cosa_disable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-/* FIXME: This routine probably should check for cosa_start_tx() called when
- * the previous transmit is still unfinished. In this case the non-zero
- * return value should indicate to the caller that the queuing(sp?) up
- * the transmit has failed.
- */
-static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
-{
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
-#ifdef DEBUG_DATA
- int i;
-
- pr_info("cosa%dc%d: starting tx(0x%x)",
- chan->cosa->num, chan->num, len);
- for (i = 0; i < len; i++)
- pr_cont(" %02x", buf[i]&0xff);
- pr_cont("\n");
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- chan->txbuf = buf;
- chan->txsize = len;
- if (len > COSA_MTU)
- chan->txsize = COSA_MTU;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- /* Tell the firmware we are ready */
- set_bit(chan->num, &cosa->txbitmap);
- put_driver_status(cosa);
-
- return 0;
-}
-
-static void put_driver_status(struct cosa_data *cosa)
-{
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
- if (!cosa->rxtx) {
- if (cosa->rxbitmap | cosa->txbitmap) {
- if (!cosa->enabled) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- }
- } else if (cosa->enabled) {
- cosa->enabled = 0;
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
- }
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static void put_driver_status_nolock(struct cosa_data *cosa)
-{
- int status;
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
-
- if (cosa->rxbitmap | cosa->txbitmap) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- } else {
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- cosa->enabled = 0;
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
-}
-
-/* The "kickme" function: When the DMA times out, this is called to
- * clean up the driver status.
- * FIXME: Preliminary support, the interface is probably wrong.
- */
-static void cosa_kick(struct cosa_data *cosa)
-{
- unsigned long flags, flags1;
- char *s = "(probably) IRQ";
-
- if (test_bit(RXBIT, &cosa->rxtx))
- s = "RX DMA";
- if (test_bit(TXBIT, &cosa->rxtx))
- s = "TX DMA";
-
- pr_info("%s: %s timeout - restarting\n", cosa->name, s);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->rxtx = 0;
-
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
-
- /* FIXME: Anything else? */
- udelay(100);
- cosa_putstatus(cosa, 0);
- udelay(100);
- (void)cosa_getdata8(cosa);
- udelay(100);
- cosa_putdata8(cosa, 0);
- udelay(100);
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-/* Check if the whole buffer is DMA-able. It means it is below the 16M of
- * physical memory and doesn't span the 64k boundary. For now it seems
- * SKB's never do this, but we'll check this anyway.
- */
-static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
-{
- static int count;
- unsigned long b = (unsigned long)buf;
-
- if (b + len >= MAX_DMA_ADDRESS)
- return 0;
- if ((b ^ (b + len)) & 0x10000) {
- if (count++ < 5)
- pr_info("%s: packet spanning a 64k boundary\n",
- chan->name);
- return 0;
- }
- return 1;
-}
-
-/* ---------- The SRP/COSA ROM monitor functions ---------- */
-
-/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
- * drivers need to say 4-digit hex number meaning start address of the microcode
- * separated by a single space. Monitor replies by saying " =". Now driver
- * has to write 4-digit hex number meaning the last byte address ended
- * by a single space. Monitor has to reply with a space. Now the download
- * begins. After the download monitor replies with "\r\n." (CR LF dot).
- */
-static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
-{
- int i;
-
- if (put_wait_data(cosa, 'w') == -1)
- return -1;
- if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -10;
- if (get_wait_data(cosa) != ' ')
- return -11;
- if (get_wait_data(cosa) != '=')
- return -12;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -13;
- if (put_wait_data(cosa, ' ') == -1)
- return -18;
- if (get_wait_data(cosa) != ' ')
- return -19;
-
- while (length--) {
- char c;
-#ifndef SRP_DOWNLOAD_AT_BOOT
- if (get_user(c, microcode))
- return -23; /* ??? */
-#else
- c = *microcode;
-#endif
- if (put_wait_data(cosa, c) == -1)
- return -20;
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Starting microcode is done via the "g" command of the SRP monitor.
- * The chat should be the following: "g" "g=" "<addr><CR>"
- * "<CR><CR><LF><CR><LF>".
- */
-static int startmicrocode(struct cosa_data *cosa, int address)
-{
- if (put_wait_data(cosa, 'g') == -1)
- return -1;
- if (get_wait_data(cosa) != 'g')
- return -2;
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, '\r') == -1)
- return -5;
-
- if (get_wait_data(cosa) != '\r')
- return -6;
- if (get_wait_data(cosa) != '\r')
- return -7;
- if (get_wait_data(cosa) != '\n')
- return -8;
- if (get_wait_data(cosa) != '\r')
- return -9;
- if (get_wait_data(cosa) != '\n')
- return -10;
-#if 0
- printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Reading memory is done via the "r" command of the SRP monitor.
- * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
- * Then driver can read the data and the conversation is finished
- * by SRP monitor sending "<CR><LF>." (dot at the end).
- *
- * This routine is not needed during the normal operation and serves
- * for debugging purposes only.
- */
-static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
-{
- if (put_wait_data(cosa, 'r') == -1)
- return -1;
- if ((get_wait_data(cosa)) != 'r')
- return -2;
- if ((get_wait_data(cosa)) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -5;
- if (get_wait_data(cosa) != ' ')
- return -6;
- if (get_wait_data(cosa) != '=')
- return -7;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -8;
- if (put_wait_data(cosa, ' ') == -1)
- return -9;
- if (get_wait_data(cosa) != ' ')
- return -10;
-
- while (length--) {
- char c;
- int i;
-
- i = get_wait_data(cosa);
- if (i == -1) {
- pr_info("0x%04x bytes remaining\n", length);
- return -11;
- }
- c = i;
-#if 1
- if (put_user(c, microcode))
- return -23; /* ??? */
-#else
- *microcode = c;
-#endif
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* This function resets the device and reads the initial prompt
- * of the device's ROM monitor.
- */
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
-{
- int i = 0, id = 0, prev = 0, curr = 0;
-
- /* Reset the card ... */
- cosa_putstatus(cosa, 0);
- cosa_getdata8(cosa);
- cosa_putstatus(cosa, SR_RST);
- msleep(500);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
-
- /* Try to read the ID string. The card then prints out the
- * identification string ended by the "\n\x2e".
- *
- * The following loop is indexed through i (instead of id)
- * to avoid looping forever when for any reason
- * the port returns '\r', '\n' or '\x2e' permanently.
- */
- for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
- curr = get_wait_data(cosa);
- if (curr == -1)
- return -1;
-
- curr &= 0xff;
- if (curr != '\r' && curr != '\n' && curr != 0x2e)
- idstring[id++] = curr;
- if (curr == 0x2e && prev == '\n')
- break;
- }
- /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
- idstring[id] = '\0';
- return id;
-}
-
-/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-
-/* This routine gets the data byte from the card waiting for the SR_RX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware.
- */
-static int get_wait_data(struct cosa_data *cosa)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_RX_RDY) {
- short r;
-
- r = cosa_getdata8(cosa);
-#if 0
- pr_info("get_wait_data returning after %d retries\n",
- 999 - retries);
-#endif
- return r;
- }
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
- }
- pr_info("timeout in get_wait_data (status 0x%x)\n",
- cosa_getstatus(cosa));
- return -1;
-}
-
-/* This routine puts the data byte to the card waiting for the SR_TX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware).
- */
-static int put_wait_data(struct cosa_data *cosa, int data)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_TX_RDY) {
- cosa_putdata8(cosa, data);
-#if 0
- pr_info("Putdata: %d retries\n", 999 - retries);
-#endif
- return 0;
- }
-#if 0
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
-#endif
- }
- pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
- cosa->num, cosa_getstatus(cosa));
- return -1;
-}
-
-/* The following routine puts the hexadecimal number into the SRP monitor
- * and verifies the proper echo of the sent bytes. Returns 0 on success,
- * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
- * (-2,-4,-6,-8) means that reading echo failed.
- */
-static int puthexnumber(struct cosa_data *cosa, int number)
-{
- char temp[5];
- int i;
-
- /* Well, I should probably replace this by something faster. */
- sprintf(temp, "%04X", number);
- for (i = 0; i < 4; i++) {
- if (put_wait_data(cosa, temp[i]) == -1) {
- pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
- cosa->num, i);
- return -1 - 2 * i;
- }
- if (get_wait_data(cosa) != temp[i]) {
- pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
- cosa->num, i);
- return -2 - 2 * i;
- }
- }
- return 0;
-}
-
-/* ---------- Interrupt routines ---------- */
-
-/* There are three types of interrupt:
- * At the beginning of transmit - this handled is in tx_interrupt(),
- * at the beginning of receive - it is in rx_interrupt() and
- * at the end of transmit/receive - it is the eot_interrupt() function.
- * These functions are multiplexed by cosa_interrupt() according to the
- * COSA status byte. I have moved the rx/tx/eot interrupt handling into
- * separate functions to make it more readable. These functions are inline,
- * so there should be no overhead of function call.
- *
- * In the COSA bus-master mode, we need to tell the card the address of a
- * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
- * It's time to use the bottom half :-(
- */
-
-/* Transmit interrupt routine - called when COSA is willing to obtain
- * data from the OS. The most tricky part of the routine is selection
- * of channel we (OS) want to send packet for. For SRP we should probably
- * use the round-robin approach. The newer COSA firmwares have a simple
- * flow-control - in the status word has bits 2 and 3 set to 1 means that the
- * channel 0 or 1 doesn't want to receive data.
- *
- * It seems there is a bug in COSA firmware (need to trace it further):
- * When the driver status says that the kernel has no more data for transmit
- * (e.g. at the end of TX DMA) and then the kernel changes its mind
- * (e.g. new packet is queued to hard_start_xmit()), the card issues
- * the TX interrupt but does not mark the channel as ready-to-transmit.
- * The fix seems to be to push the packet to COSA despite its request.
- * We first try to obey the card's opinion, and then fall back to forced TX.
- */
-static inline void tx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(TXBIT, &cosa->rxtx);
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- /* flow control, see the comment above */
- int i = 0;
-
- if (!cosa->txbitmap) {
- pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
- cosa->name);
- put_driver_status_nolock(cosa);
- clear_bit(TXBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- }
- while (1) {
- cosa->txchan++;
- i++;
- if (cosa->txchan >= cosa->nchannels)
- cosa->txchan = 0;
- if (!(cosa->txbitmap & (1 << cosa->txchan)))
- continue;
- if (~status &
- (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
- break;
- /* in second pass, accept first ready-to-TX channel */
- if (i > cosa->nchannels) {
- /* Can be safely ignored */
-#ifdef DEBUG_IRQS
- printk(KERN_DEBUG "%s: Forcing TX "
- "to not-ready channel %d\n",
- cosa->name, cosa->txchan);
-#endif
- break;
- }
- }
-
- cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan + cosa->txchan,
- cosa->chan[cosa->txchan].txbuf,
- cosa->txsize)) {
- cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
- } else {
- memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
- cosa->txbuf = cosa->bouncebuf;
- }
- }
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
- debug_data_in(cosa, cosa_getdata8(cosa));
-#else
- cosa_getdata8(cosa);
-#endif
- set_bit(IRQBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize & 0xff);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize & 0xff);
-#endif
- }
- } else {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
- | (cosa->txsize & 0x1fff));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
- (cosa->txsize & 0x1fff));
- debug_data_in(cosa, cosa_getdata8(cosa));
- debug_status_out(cosa, 0);
-#else
- cosa_getdata8(cosa);
-#endif
- cosa_putstatus(cosa, 0);
- }
-
- if (cosa->busmaster) {
- unsigned long addr = virt_to_bus(cosa->txbuf);
- int count = 0;
-
- pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- udelay(10);
- if (count > 1000)
- break;
- }
- pr_info("status %x\n", cosa_getstatus(cosa));
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16) & 0xffff);
-
- count = 0;
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- if (count > 1000)
- break;
- udelay(10);
- }
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr & 0xffff);
- flags1 = claim_dma_lock();
- set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- } else {
- /* start the DMA */
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_WRITE);
- set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
- set_dma_count(cosa->dma, cosa->txsize);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- }
- cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void rx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
-#endif
-
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(RXBIT, &cosa->rxtx);
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- set_bit(IRQBIT, &cosa->rxtx);
- put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) << 8;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize >> 8);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize & 0xff);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- } else {
- cosa->rxsize = cosa_getdata16(cosa);
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
- pr_warn("%s: rx for unknown channel (0x%04x)\n",
- cosa->name, cosa->rxsize);
- spin_unlock_irqrestore(&cosa->lock, flags);
- goto reject;
- }
- cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
- cosa->rxsize &= 0x1fff;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- cosa->rxbuf = NULL;
- if (cosa->rxchan->setup_rx)
- cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
-
- if (!cosa->rxbuf) {
-reject: /* Reject the packet */
- pr_info("cosa%d: rejecting packet on channel %d\n",
- cosa->num, cosa->rxchan->num);
- cosa->rxbuf = cosa->bouncebuf;
- }
-
- /* start the DMA */
- flags = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
- set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- else
- set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
-
- set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
- enable_dma(cosa->dma);
- release_dma_lock(flags);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- cosa_putdata8(cosa, DRIVER_RX_READY);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- debug_data_cmd(cosa, DRIVER_RX_READY);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void eot_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-
- spin_lock_irqsave(&cosa->lock, flags);
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
- if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan + cosa->txchan;
-
- if (chan->tx_done)
- if (chan->tx_done(chan, cosa->txsize))
- clear_bit(chan->num, &cosa->txbitmap);
- } else if (test_bit(RXBIT, &cosa->rxtx)) {
-#ifdef DEBUG_DATA
- {
- int i;
-
- pr_info("cosa%dc%d: done rx(0x%x)",
- cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i = 0; i < cosa->rxsize; i++)
- pr_cont(" %02x", cosa->rxbuf[i]&0xff);
- pr_cont("\n");
- }
-#endif
- /* Packet for unknown channel? */
- if (cosa->rxbuf == cosa->bouncebuf)
- goto out;
- if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
- memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
- if (cosa->rxchan->rx_done)
- if (cosa->rxchan->rx_done(cosa->rxchan))
- clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
- } else {
- pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
- }
- /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
- * cleared anyway). We should do it as soon as possible
- * so that we can tell the COSA we are done and to give it a time
- * for recovery.
- */
-out:
- cosa->rxtx = 0;
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static irqreturn_t cosa_interrupt(int irq, void *cosa_)
-{
- unsigned status;
- int count = 0;
- struct cosa_data *cosa = cosa_;
-again:
- status = cosa_getstatus(cosa);
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
-#endif
-#ifdef DEBUG_IO
- debug_status_in(cosa, status);
-#endif
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_DOWN_REQUEST:
- tx_interrupt(cosa, status);
- break;
- case SR_UP_REQUEST:
- rx_interrupt(cosa, status);
- break;
- case SR_END_OF_TRANSFER:
- eot_interrupt(cosa, status);
- break;
- default:
- /* We may be too fast for SRP. Try to wait a bit more. */
- if (count++ < 100) {
- udelay(100);
- goto again;
- }
- pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
- cosa->num, status & 0xff, count);
- }
-#ifdef DEBUG_IRQS
- if (count)
- pr_info("%s: %d-times got unknown status in IRQ\n",
- cosa->name, count);
- else
- pr_info("%s: returning from IRQ\n", cosa->name);
-#endif
- return IRQ_HANDLED;
-}
-
-/* ---------- I/O debugging routines ---------- */
-/* These routines can be used to monitor COSA/SRP I/O and to printk()
- * the data being transferred on the data and status I/O port in a
- * readable way.
- */
-
-#ifdef DEBUG_IO
-static void debug_status_in(struct cosa_data *cosa, int status)
-{
- char *s;
-
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_UP_REQUEST:
- s = "RX_REQ";
- break;
- case SR_DOWN_REQUEST:
- s = "TX_REQ";
- break;
- case SR_END_OF_TRANSFER:
- s = "ET_REQ";
- break;
- default:
- s = "NO_REQ";
- break;
- }
- pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_USR_RQ ? "USR_RQ|" : "",
- status & SR_TX_RDY ? "TX_RDY|" : "",
- status & SR_RX_RDY ? "RX_RDY|" : "",
- s);
-}
-
-static void debug_status_out(struct cosa_data *cosa, int status)
-{
- pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|",
- status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|",
- status & SR_RST ? "RESET|" : "",
- status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
- status & SR_TX_INT_ENA ? "TXINT|" : "!txint|",
- status & SR_RX_INT_ENA ? "RXINT" : "!rxint");
-}
-
-static void debug_data_in(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_out(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_cmd(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
- cosa->name, data,
- data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
- data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
-}
-#endif
-
-/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
deleted file mode 100644
index f57e0af9d56a..000000000000
--- a/drivers/net/wan/cosa.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
-
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- */
-
-#ifndef COSA_H__
-#define COSA_H__
-
-#include <linux/ioctl.h>
-
-#ifdef __KERNEL__
-/* status register - output bits */
-#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
-#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
-#define SR_RST 0x10 /* SRP reset */
-#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
-#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
-#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
-
-/* status register - input bits */
-#define SR_USR_RQ 0x20 /* user interrupt request pending */
-#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
-#define SR_RX_RDY 0x80 /* receiver data ready */
-
-#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
- up to PC */
-#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
- from PC to SRP */
-#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
- transfer (up or down) */
-
-#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
-
-/* bits in driver status byte definitions : */
-#define SR_RDY_RCV 0x01 /* ready to receive packet */
-#define SR_RDY_SND 0x02 /* ready to send packet */
-#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
-
-/* ???? */
-#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
-#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
-
-#endif /* __KERNEL__ */
-
-#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
-#define SR_START_ADDR 0x4400 /* SRP microcode start address */
-
-#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
-#define COSA_MAX_FIRMWARE_SIZE 0x10000
-
-/* ioctls */
-struct cosa_download {
- int addr, len;
- char __user *code;
-};
-
-/* Reset the device */
-#define COSAIORSET _IO('C',0xf0)
-
-/* Start microcode at given address */
-#define COSAIOSTRT _IOW('C',0xf1, int)
-
-/* Read the block from the device memory */
-#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Write the block to the device memory (i.e. download the microcode) */
-#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
-#define COSAIORTYPE _IOR('C',0xf3, char *)
-
-/* Read the device identification string */
-#define COSAIORIDSTR _IOR('C',0xf4, char *)
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Increment/decrement the module usage count :-) */
-/* #define COSAIOMINC _IO('C',0xf5) */
-/* #define COSAIOMDEC _IO('C',0xf6) */
-
-/* Get the total number of cards installed */
-#define COSAIONRCARDS _IO('C',0xf7)
-
-/* Get the number of channels on this card */
-#define COSAIONRCHANS _IO('C',0xf8)
-
-/* Set the driver for the bus-master operations */
-#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
-
-#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
-#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
-
-/* Gets the busmaster status */
-#define COSAIOBMGET _IO('C', 0xfa)
-
-#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
deleted file mode 100644
index e985e54ba75d..000000000000
--- a/drivers/net/wan/hostess_sv11.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Comtrol SV11 card driver
- *
- * This is a slightly odd Z85230 synchronous driver. All you need to
- * know basically is
- *
- * Its a genuine Z85230
- *
- * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
- * use these facilities
- *
- * The control port is at io+1, the data at io+3 and turning off the DMA
- * is done by writing 0 to io+4
- *
- * The hardware does the bus handling to avoid the need for delays between
- * touching control registers.
- *
- * Port B isn't wired (why - beats me)
- *
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-static int dma;
-
-/* Network driver support routines
- */
-
-static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
-{
- return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- /* Send it to the PPP layer. We don't have time to process
- * it right now.
- */
- netif_rx(skb);
-}
-
-/* We've been placed in the UP state
- */
-
-static int hostess_open(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- int err = -1;
-
- /* Link layer up
- */
- switch (dma) {
- case 0:
- err = z8530_sync_open(d, &sv11->chanA);
- break;
- case 1:
- err = z8530_sync_dma_open(d, &sv11->chanA);
- break;
- case 2:
- err = z8530_sync_txdma_open(d, &sv11->chanA);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return err;
- }
- sv11->chanA.rx_function = hostess_input;
-
- /*
- * Go go go
- */
-
- netif_start_queue(d);
- return 0;
-}
-
-static int hostess_close(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- /* Discard new frames
- */
- sv11->chanA.rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind.
- */
-
-static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
-}
-
-static int hostess_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-/* Description block for a Comtrol Hostess SV11 card
- */
-
-static const struct net_device_ops hostess_ops = {
- .ndo_open = hostess_open,
- .ndo_stop = hostess_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static struct z8530_dev *sv11_init(int iobase, int irq)
-{
- struct z8530_dev *sv;
- struct net_device *netdev;
- /* Get the needed I/O space
- */
-
- if (!request_region(iobase, 8, "Comtrol SV11")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
- if (!sv)
- goto err_kzalloc;
-
- /* Stuff in the I/O addressing
- */
-
- sv->active = 0;
-
- sv->chanA.ctrlio = iobase + 1;
- sv->chanA.dataio = iobase + 3;
- sv->chanB.ctrlio = -1;
- sv->chanB.dataio = -1;
- sv->chanA.irqs = &z8530_nop;
- sv->chanB.irqs = &z8530_nop;
-
- outb(0, iobase + 4); /* DMA off */
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "Hostess SV11", sv) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_irq;
- }
-
- sv->irq = irq;
- sv->chanA.private = sv;
- sv->chanA.dev = sv;
- sv->chanB.dev = sv;
-
- if (dma) {
- /* You can have DMA off or 1 and 3 thats the lot
- * on the Comtrol.
- */
- sv->chanA.txdma = 3;
- sv->chanA.rxdma = 1;
- outb(0x03 | 0x08, iobase + 4); /* DMA on */
- if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
- goto err_txdma;
-
- if (dma == 1)
- if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
- goto err_rxdma;
- }
-
- /* Kill our private IRQ line the hostess can end up chattering
- * until the configuration is set
- */
- disable_irq(irq);
-
- /* Begin normal initialise
- */
-
- if (z8530_init(sv)) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_dma;
- }
- z8530_channel_load(&sv->chanB, z8530_dead_port);
- if (sv->type == Z85C30)
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
- else
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
-
- enable_irq(irq);
-
- /* Now we can take the IRQ
- */
-
- sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
- if (!netdev)
- goto free_dma;
-
- dev_to_hdlc(netdev)->attach = hostess_attach;
- dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
- netdev->netdev_ops = &hostess_ops;
- netdev->base_addr = iobase;
- netdev->irq = irq;
-
- if (register_hdlc_device(netdev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(netdev);
- goto free_dma;
- }
-
- z8530_describe(sv, "I/O", iobase);
- sv->active = 1;
- return sv;
-
-free_dma:
- if (dma == 1)
- free_dma(sv->chanA.rxdma);
-err_rxdma:
- if (dma)
- free_dma(sv->chanA.txdma);
-err_txdma:
- free_irq(irq, sv);
-err_irq:
- kfree(sv);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void sv11_shutdown(struct z8530_dev *dev)
-{
- unregister_hdlc_device(dev->chanA.netdevice);
- z8530_shutdown(dev);
- free_irq(dev->irq, dev);
- if (dma) {
- if (dma == 1)
- free_dma(dev->chanA.rxdma);
- free_dma(dev->chanA.txdma);
- }
- release_region(dev->chanA.ctrlio - 1, 8);
- free_netdev(dev->chanA.netdevice);
- kfree(dev);
-}
-
-static int io = 0x200;
-static int irq = 9;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
-
-static struct z8530_dev *sv11_unit;
-
-static int sv11_module_init(void)
-{
- sv11_unit = sv11_init(io, irq);
- if (!sv11_unit)
- return -ENODEV;
- return 0;
-}
-module_init(sv11_module_init);
-
-static void sv11_module_cleanup(void)
-{
- if (sv11_unit)
- sv11_shutdown(sv11_unit);
-}
-module_exit(sv11_module_cleanup);
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
deleted file mode 100644
index f00fe4491d69..000000000000
--- a/drivers/net/wan/lmc/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Lan Media 21140 based WAN cards
-# Specifically the 1000,1200,5200,5245
-#
-
-obj-$(CONFIG_LANMEDIA) += lmc.o
-
-lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
-
-# Like above except every packet gets echoed to KERN_DEBUG
-# in hex
-#
-# DBDEF = \
-# -DDEBUG \
-# -DLMC_PACKET_LOG
-
-ccflags-y := $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
deleted file mode 100644
index d7d59b4595f9..000000000000
--- a/drivers/net/wan/lmc/lmc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_H_
-#define _LMC_H_
-
-#include "lmc_var.h"
-
-/*
- * prototypes for everyone
- */
-int lmc_probe(struct net_device * dev);
-unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
- devaddr, unsigned regno);
-void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
- unsigned regno, unsigned data);
-void lmc_led_on(lmc_softc_t * const, u32);
-void lmc_led_off(lmc_softc_t * const, u32);
-unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
-void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-
-int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
-
-extern lmc_media_t lmc_ds3_media;
-extern lmc_media_t lmc_ssi_media;
-extern lmc_media_t lmc_t1_media;
-extern lmc_media_t lmc_hssi_media;
-
-#ifdef _DBG_EVENTLOG
-static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-#endif
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
deleted file mode 100644
index 2b6051bda3fb..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-
-#include "lmc_debug.h"
-
-/*
- * Prints out len, max to 80 octets using printk, 20 per line
- */
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
- int iNewLine = 1;
- char str[80], *pstr;
-
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr = str+strlen(str);
-
- if(iLen > 240){
- printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
- iLen = 240;
- }
- else{
- printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
- }
-
- while(iLen > 0)
- {
- sprintf(pstr, "%02x ", *ucData);
- pstr+=3;
- ucData++;
- if( !(iNewLine % 20))
- {
- sprintf(pstr, "\n");
- printk(str);
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr=str+strlen(str);
- }
- iNewLine++;
- iLen--;
- }
- sprintf(pstr, "\n");
- printk(str);
-}
-#endif
-#endif
-
-#ifdef DEBUG
-u32 lmcEventLogIndex;
-u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
-{
- lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
- lmcEventLogBuf[lmcEventLogIndex++] = arg2;
- lmcEventLogBuf[lmcEventLogIndex++] = arg3;
- lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
-
- lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-}
-#endif /* DEBUG */
-
-/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
deleted file mode 100644
index cfae9eddf003..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_DEBUG_H_
-#define _LMC_DEBUG_H_
-
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-
-
-
-/* Debug --- Event log definitions --- */
-/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
-#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
-#define LMC_EVENTLOGARGS 4 /* number of args for each event */
-
-/* event indicators */
-#define LMC_EVENT_XMT 1
-#define LMC_EVENT_XMTEND 2
-#define LMC_EVENT_XMTINT 3
-#define LMC_EVENT_RCVINT 4
-#define LMC_EVENT_RCVEND 5
-#define LMC_EVENT_INT 6
-#define LMC_EVENT_XMTINTTMO 7
-#define LMC_EVENT_XMTPRCTMO 8
-#define LMC_EVENT_INTEND 9
-#define LMC_EVENT_RESET1 10
-#define LMC_EVENT_RESET2 11
-#define LMC_EVENT_FORCEDRESET 12
-#define LMC_EVENT_WATCHDOG 13
-#define LMC_EVENT_BADPKTSURGE 14
-#define LMC_EVENT_TBUSY0 15
-#define LMC_EVENT_TBUSY1 16
-
-
-#ifdef DEBUG
-extern u32 lmcEventLogIndex;
-extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
-#else
-#define LMC_EVENT_LOG(x,y,z)
-#endif /* end ifdef _DBG_EVENTLOG */
-
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
deleted file mode 100644
index 8c65e2176e94..000000000000
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_IOCTL_H_
-#define _LMC_IOCTL_H_
-/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */
-#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */
-#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5
-#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6
-#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7
-#define LMCIOCGETXINFO SIOCDEVPRIVATE+8
-#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9
-#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10
-#define LMCIOCRESET SIOCDEVPRIVATE+11
-#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12
-#define LMCIOCIFTYPE SIOCDEVPRIVATE+13
-#define LMCIOCXILINX SIOCDEVPRIVATE+14
-
-#define LMC_CARDTYPE_UNKNOWN -1
-#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */
-#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */
-#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */
-#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */
-
-#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */
-#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */
-#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */
-#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */
-
-#define LMC_CTL_OFF 0 /* generic OFF value */
-#define LMC_CTL_ON 1 /* generic ON value */
-
-#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */
-#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */
-
-#define LMC_CTL_CRC_LENGTH_16 16
-#define LMC_CTL_CRC_LENGTH_32 32
-#define LMC_CTL_CRC_BYTESIZE_2 2
-#define LMC_CTL_CRC_BYTESIZE_4 4
-
-
-#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */
-#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */
-
-#define LMC_CTL_CIRCUIT_TYPE_E1 0
-#define LMC_CTL_CIRCUIT_TYPE_T1 1
-
-/*
- * IFTYPE defines
- */
-#define LMC_PPP 1 /* use generic HDLC interface */
-#define LMC_NET 2 /* use direct net interface */
-#define LMC_RAW 3 /* use direct net interface */
-
-/*
- * These are not in the least IOCTL related, but I want them common.
- */
-/*
- * assignments for the GPIO register on the DEC chip (common)
- */
-#define LMC_GEP_INIT 0x01 /* 0: */
-#define LMC_GEP_RESET 0x02 /* 1: */
-#define LMC_GEP_MODE 0x10 /* 4: */
-#define LMC_GEP_DP 0x20 /* 5: */
-#define LMC_GEP_DATA 0x40 /* 6: serial out */
-#define LMC_GEP_CLK 0x80 /* 7: serial clock */
-
-/*
- * HSSI GPIO assignments
- */
-#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */
-#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */
-
-/*
- * T1 GPIO assignments
- */
-#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */
-#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */
-
-/*
- * Common MII16 bits
- */
-#define LMC_MII16_LED0 0x0080
-#define LMC_MII16_LED1 0x0100
-#define LMC_MII16_LED2 0x0200
-#define LMC_MII16_LED3 0x0400 /* Error, and the red one */
-#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */
-#define LMC_MII16_FIFO_RESET 0x0800
-
-/*
- * definitions for HSSI
- */
-#define LMC_MII16_HSSI_TA 0x0001
-#define LMC_MII16_HSSI_CA 0x0002
-#define LMC_MII16_HSSI_LA 0x0004
-#define LMC_MII16_HSSI_LB 0x0008
-#define LMC_MII16_HSSI_LC 0x0010
-#define LMC_MII16_HSSI_TM 0x0020
-#define LMC_MII16_HSSI_CRC 0x0040
-
-/*
- * assignments for the MII register 16 (DS3)
- */
-#define LMC_MII16_DS3_ZERO 0x0001
-#define LMC_MII16_DS3_TRLBK 0x0002
-#define LMC_MII16_DS3_LNLBK 0x0004
-#define LMC_MII16_DS3_RAIS 0x0008
-#define LMC_MII16_DS3_TAIS 0x0010
-#define LMC_MII16_DS3_BIST 0x0020
-#define LMC_MII16_DS3_DLOS 0x0040
-#define LMC_MII16_DS3_CRC 0x1000
-#define LMC_MII16_DS3_SCRAM 0x2000
-#define LMC_MII16_DS3_SCRAM_LARS 0x4000
-
-/* Note: 2 pairs of LEDs where swapped by mistake
- * in Xilinx code for DS3 & DS1 adapters */
-#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */
-#define LMC_DS3_LED1 0x0080 /* bit 07 blue */
-#define LMC_DS3_LED2 0x0400 /* bit 10 green */
-#define LMC_DS3_LED3 0x0200 /* bit 09 red */
-
-/*
- * framer register 0 and 7 (7 is latched and reset on read)
- */
-#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */
-#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */
-#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */
-#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */
-#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */
-
-/*
- * Framer register 9 contains the blue alarm signal
- */
-#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */
-
-/*
- * Framer register 0x10 contains xbit error
- */
-#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */
-
-/*
- * And SSI, LMC1000
- */
-#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */
-#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */
-#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */
-#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */
-#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */
-#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */
-#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */
-
-/*
- * bits 0x0080 through 0x0800 are generic, and described
- * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
- */
-#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */
-#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */
-#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */
-#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */
-
-/*
- * Some of the MII16 bits are mirrored in the MII17 register as well,
- * but let's keep thing separate for now, and get only the cable from
- * the MII17.
- */
-#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */
-#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */
-
-/*
- * And T1, LMC1200
- */
-#define LMC_MII16_T1_UNUSED1 0x0003
-#define LMC_MII16_T1_XOE 0x0004
-#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */
-#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */
-#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */
-#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */
-
-#define LMC_MII16_T1_LED0 0x0100
-#define LMC_MII16_T1_LED1 0x0080
-#define LMC_MII16_T1_LED2 0x0400
-#define LMC_MII16_T1_LED3 0x0200
-#define LMC_MII16_T1_FIFO_RESET 0x0800
-
-#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */
-#define LMC_MII16_T1_UNUSED2 0xe000
-
-
-/* 8370 framer registers */
-
-#define T1FRAMER_ALARM1_STATUS 0x47
-#define T1FRAMER_ALARM2_STATUS 0x48
-#define T1FRAMER_FERR_LSB 0x50
-#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */
-#define T1FRAMER_LCV_LSB 0x54
-#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */
-#define T1FRAMER_AERR 0x5A
-
-/* mask for the above AERR register */
-#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */
-#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */
-#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */
-
-/* 8370 framer register ALM1 (0x47) values
- * used to determine link status
- */
-
-#define T1F_SIGFRZ 0x01 /* signaling freeze */
-#define T1F_RLOF 0x02 /* receive loss of frame alignment */
-#define T1F_RLOS 0x04 /* receive loss of signal */
-#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */
-#define T1F_RAIS 0x10 /* receive alarm indication signal */
-#define T1F_UNUSED 0x20
-#define T1F_RYEL 0x40 /* receive yellow alarm */
-#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */
-
-#define LMC_T1F_WRITE 0
-#define LMC_T1F_READ 1
-
-typedef struct lmc_st1f_control {
- int command;
- int address;
- int value;
- char __user *data;
-} lmc_t1f_control;
-
-enum lmc_xilinx_c {
- lmc_xilinx_reset = 1,
- lmc_xilinx_load_prom = 2,
- lmc_xilinx_load = 3
-};
-
-struct lmc_xilinx_control {
- enum lmc_xilinx_c command;
- int len;
- char __user *data;
-};
-
-/* ------------------ end T1 defs ------------------- */
-
-#define LMC_MII_LedMask 0x0780
-#define LMC_MII_LedBitPos 7
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
deleted file mode 100644
index 76c6b4f89890..000000000000
--- a/drivers/net/wan/lmc/lmc_main.c
+++ /dev/null
@@ -1,2009 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Alan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- *
- * To control link specific options lmcctl is required.
- * It can be obtained from ftp.lanmedia.com.
- *
- * Linux driver notes:
- * Linux uses the device struct lmc_private to pass private information
- * around.
- *
- * The initialization portion of this driver (the lmc_reset() and the
- * lmc_dec_reset() functions, as well as the led controls and the
- * lmc_initcsrs() functions.
- *
- * The watchdog function runs every second and checks to see if
- * we still have link, and that the timing source is what we expected
- * it to be. If link is lost, the interface is marked down, and
- * we no longer can transmit.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-//#include <asm/spinlock.h>
-
-#define DRIVER_MAJOR_VERSION 1
-#define DRIVER_MINOR_VERSION 34
-#define DRIVER_SUB_VERSION 0
-
-#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-#include "lmc_proto.h"
-
-static int LMC_PKT_BUF_SZ = 1542;
-
-static const struct pci_device_id lmc_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_VENDOR_ID_LMC, PCI_ANY_ID },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_VENDOR_ID_LMC },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
-MODULE_LICENSE("GPL v2");
-
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static int lmc_rx (struct net_device *dev);
-static int lmc_open(struct net_device *dev);
-static int lmc_close(struct net_device *dev);
-static struct net_device_stats *lmc_get_stats(struct net_device *dev);
-static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
-static void lmc_softreset(lmc_softc_t * const);
-static void lmc_running_reset(struct net_device *dev);
-static int lmc_ifdown(struct net_device * const);
-static void lmc_watchdog(struct timer_list *t);
-static void lmc_reset(lmc_softc_t * const sc);
-static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
-
-/*
- * linux reserves 16 device specific IOCTLs. We call them
- * LMCIOC* to control various bits of our world.
- */
-static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- lmc_ctl_t ctl;
- int ret = -EOPNOTSUPP;
- u16 regVal;
- unsigned long flags;
-
- /*
- * Most functions mess with the structure
- * Disable interrupts while we do the polling
- */
-
- switch (cmd) {
- /*
- * Return current driver state. Since we keep this up
- * To date internally, just copy this out to the user.
- */
- case LMCIOCGINFO: /*fold01*/
- if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCSINFO: /*fold01*/
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_status (sc, &ctl);
-
- if(ctl.crc_length != sc->ictl.crc_length) {
- sc->lmc_media->set_crc_length(sc, ctl.crc_length);
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- else
- sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
- case LMCIOCIFTYPE: /*fold01*/
- {
- u16 old_type = sc->if_type;
- u16 new_type;
-
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if (copy_from_user(&new_type, data, sizeof(u16))) {
- ret = -EFAULT;
- break;
- }
-
-
- if (new_type == old_type)
- {
- ret = 0 ;
- break; /* no change */
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_proto_close(sc);
-
- sc->if_type = new_type;
- lmc_proto_attach(sc);
- ret = lmc_proto_open(sc);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- break;
- }
-
- case LMCIOCGETXINFO: /*fold01*/
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
-
- sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
- sc->lmc_xinfo.PciSlotNumber = 0;
- sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
- sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
- sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
- sc->lmc_xinfo.XilinxRevisionNumber =
- lmc_mii_readreg (sc, 0, 3) & 0xf;
- sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
- sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
- sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
-
- if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-
- case LMCIOCGETLMCSTATS:
- spin_lock_irqsave(&sc->lmc_lock, flags);
- if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
- sc->extra_stats.framingBitErrorCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
- sc->extra_stats.framingBitErrorCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
- sc->extra_stats.lineCodeViolationCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
- sc->extra_stats.lineCodeViolationCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
- regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
-
- sc->extra_stats.lossOfFrameCount +=
- (regVal & T1FRAMER_LOF_MASK) >> 4;
- sc->extra_stats.changeOfFrameAlignmentCount +=
- (regVal & T1FRAMER_COFA_MASK) >> 2;
- sc->extra_stats.severelyErroredFrameCount +=
- regVal & T1FRAMER_SEF_MASK;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- if (copy_to_user(data, &sc->lmc_device->stats,
- sizeof(sc->lmc_device->stats)) ||
- copy_to_user(data + sizeof(sc->lmc_device->stats),
- &sc->extra_stats, sizeof(sc->extra_stats)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCCLEARLMCSTATS:
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
- memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
- break;
-
- case LMCIOCSETCIRCUIT: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
- sc->ictl.circuit_type = ctl.circuit_type;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
-
- break;
-
- case LMCIOCRESET: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- /* Reset driver and bring back to current state */
- printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
- lmc_running_reset (dev);
- printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
-
- LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
-#ifdef DEBUG
- case LMCIOCDUMPEVENTLOG:
- if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
- ret = -EFAULT;
- break;
- }
- if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
- sizeof(lmcEventLogBuf)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-#endif /* end ifdef _DBG_EVENTLOG */
- case LMCIOCT1CONTROL: /*fold01*/
- if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
- ret = -EOPNOTSUPP;
- break;
- }
- break;
- case LMCIOCXILINX: /*fold01*/
- {
- struct lmc_xilinx_control xc; /*fold02*/
-
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- /*
- * Stop the xwitter whlie we restart the hardware
- */
- netif_stop_queue(dev);
-
- if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
- ret = -EFAULT;
- break;
- }
- switch(xc.command){
- case lmc_xilinx_reset: /*fold02*/
- {
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /* Reset the frammer hardware */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-// lmc_softreset(sc);
-
- {
- int i;
- for(i = 0; i < 5; i++){
- lmc_led_on(sc, LMC_DS3_LED0);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED0);
- lmc_led_on(sc, LMC_DS3_LED1);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED1);
- lmc_led_on(sc, LMC_DS3_LED3);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED3);
- lmc_led_on(sc, LMC_DS3_LED2);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED2);
- }
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-
-
- ret = 0x0;
-
- }
-
- break;
- case lmc_xilinx_load_prom: /*fold02*/
- {
- int timeout = 500000;
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0x0;
-
-
- break;
-
- }
-
- case lmc_xilinx_load: /*fold02*/
- {
- char *data;
- int pos;
- int timeout = 500000;
-
- if (!xc.data) {
- ret = -EINVAL;
- break;
- }
-
- data = memdup_user(xc.data, xc.len);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
-
- printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * Clear the Xilinx and start prgramming from the DEC
- */
-
- /*
- * Set ouput as:
- * Reset: 0 (active)
- * DP: 0 (active)
- * Mode: 1
- *
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio &= ~LMC_GEP_DP;
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Wait at least 10 us 20 to be safe
- */
- udelay(50);
-
- /*
- * Clear reset and activate programming lines
- * Reset: Input
- * DP: Input
- * Clock: Output
- * Data: Output
- * Mode: Output
- */
- lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Set LOAD, DATA, Clock to 1
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio |= LMC_GEP_MODE;
- sc->lmc_gpio |= LMC_GEP_DATA;
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
- printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout);
-
- for(pos = 0; pos < xc.len; pos++){
- switch(data[pos]){
- case 0:
- sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
- break;
- case 1:
- sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
- break;
- default:
- printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
- sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
- }
- sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
-
- sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
- }
- if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
- }
- else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
- }
- else {
- printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
- }
-
- lmc_gpio_mkinput(sc, 0xff);
-
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- kfree(data);
-
- ret = 0;
-
- break;
- }
- default: /*fold02*/
- ret = -EBADE;
- break;
- }
-
- netif_wake_queue(dev);
- sc->lmc_txfull = 0;
-
- }
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-
-/* the watchdog process that cruises around */
-static void lmc_watchdog(struct timer_list *t) /*fold00*/
-{
- lmc_softc_t *sc = from_timer(sc, t, timer);
- struct net_device *dev = sc->lmc_device;
- int link_status;
- u32 ticks;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- if(sc->check != 0xBEAFCAFE){
- printk("LMC: Corrupt net_device struct, breaking out\n");
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- return;
- }
-
-
- /* Make sure the tx jabber and rx watchdog are off,
- * and the transmit and receive processes are running.
- */
-
- LMC_CSR_WRITE (sc, csr_15, 0x00000011);
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- if (sc->lmc_ok == 0)
- goto kick_timer;
-
- LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
-
- /* --- begin time out check -----------------------------------
- * check for a transmit interrupt timeout
- * Has the packet xmt vs xmt serviced threshold been exceeded */
- if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd == 0)
- {
-
- /* wait for the watchdog to come around again */
- sc->tx_TimeoutInd = 1;
- }
- else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd)
- {
-
- LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
-
- sc->tx_TimeoutDisplay = 1;
- sc->extra_stats.tx_TimeoutCnt++;
-
- /* DEC chip is stuck, hit it with a RESET!!!! */
- lmc_running_reset (dev);
-
-
- /* look at receive & transmit process state to make sure they are running */
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-
- /* look at: DSR - 02 for Reg 16
- * CTS - 08
- * DCD - 10
- * RI - 20
- * for Reg 17
- */
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
-
- /* reset the transmit timeout detection flag */
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- } else {
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- }
-
- /* --- end time out check ----------------------------------- */
-
-
- link_status = sc->lmc_media->get_link_status (sc);
-
- /*
- * hardware level link lost, but the interface is marked as up.
- * Mark it as down.
- */
- if ((link_status == 0) && (sc->last_link_status != 0)) {
- printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
- sc->last_link_status = 0;
- /* lmc_reset (sc); Why reset??? The link can go down ok */
-
- /* Inform the world that link has been lost */
- netif_carrier_off(dev);
- }
-
- /*
- * hardware link is up, but the interface is marked as down.
- * Bring it back up again.
- */
- if (link_status != 0 && sc->last_link_status == 0) {
- printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
- sc->last_link_status = 1;
- /* lmc_reset (sc); Again why reset??? */
-
- netif_carrier_on(dev);
- }
-
- /* Call media specific watchdog functions */
- sc->lmc_media->watchdog(sc);
-
- /*
- * Poke the transmitter to make sure it
- * never stops, even if we run out of mem
- */
- LMC_CSR_WRITE(sc, csr_rxpoll, 0);
-
- /*
- * Check for code that failed
- * and try and fix it as appropriate
- */
- if(sc->failed_ring == 1){
- /*
- * Failed to setup the recv/xmit rin
- * Try again
- */
- sc->failed_ring = 0;
- lmc_softreset(sc);
- }
- if(sc->failed_recv_alloc == 1){
- /*
- * We failed to alloc mem in the
- * interrupt handler, go through the rings
- * and rebuild them
- */
- sc->failed_recv_alloc = 0;
- lmc_softreset(sc);
- }
-
-
- /*
- * remember the timer value
- */
-kick_timer:
-
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
- sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- /*
- * restart this timer.
- */
- sc->timer.expires = jiffies + (HZ);
- add_timer (&sc->timer);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
-
-static int lmc_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops lmc_ops = {
- .ndo_open = lmc_open,
- .ndo_stop = lmc_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_siocdevprivate = lmc_siocdevprivate,
- .ndo_tx_timeout = lmc_driver_timeout,
- .ndo_get_stats = lmc_get_stats,
-};
-
-static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- lmc_softc_t *sc;
- struct net_device *dev;
- u16 subdevice;
- u16 AdapModelNum;
- int err;
- static int cards_found;
-
- err = pcim_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
- return err;
- }
-
- err = pci_request_regions(pdev, "lmc");
- if (err) {
- printk(KERN_ERR "lmc: pci_request_region failed\n");
- return err;
- }
-
- /*
- * Allocate our own device structure
- */
- sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
- if (!sc)
- return -ENOMEM;
-
- dev = alloc_hdlcdev(sc);
- if (!dev) {
- printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
- return -ENOMEM;
- }
-
-
- dev->type = ARPHRD_HDLC;
- dev_to_hdlc(dev)->xmit = lmc_start_xmit;
- dev_to_hdlc(dev)->attach = lmc_attach;
- dev->netdev_ops = &lmc_ops;
- dev->watchdog_timeo = HZ; /* 1 second */
- dev->tx_queue_len = 100;
- sc->lmc_device = dev;
- sc->name = dev->name;
- sc->if_type = LMC_PPP;
- sc->check = 0xBEAFCAFE;
- dev->base_addr = pci_resource_start(pdev, 0);
- dev->irq = pdev->irq;
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /*
- * This will get the protocol layer ready and do any 1 time init's
- * Must have a valid sc and dev structure
- */
- lmc_proto_attach(sc);
-
- /* Init the spin lock so can call it latter */
-
- spin_lock_init(&sc->lmc_lock);
- pci_set_master(pdev);
-
- printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
- dev->base_addr, dev->irq);
-
- err = register_hdlc_device(dev);
- if (err) {
- printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
- free_netdev(dev);
- return err;
- }
-
- sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-
- /*
- *
- * Check either the subvendor or the subdevice, some systems reverse
- * the setting in the bois, seems to be version and arch dependent?
- * Fix the error, exchange the two values
- */
- if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
- subdevice = pdev->subsystem_vendor;
-
- switch (subdevice) {
- case PCI_DEVICE_ID_LMC_HSSI:
- printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
- sc->lmc_media = &lmc_hssi_media;
- break;
- case PCI_DEVICE_ID_LMC_DS3:
- printk(KERN_INFO "%s: LMC DS3\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_DS3;
- sc->lmc_media = &lmc_ds3_media;
- break;
- case PCI_DEVICE_ID_LMC_SSI:
- printk(KERN_INFO "%s: LMC SSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_SSI;
- sc->lmc_media = &lmc_ssi_media;
- break;
- case PCI_DEVICE_ID_LMC_T1:
- printk(KERN_INFO "%s: LMC T1\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_T1;
- sc->lmc_media = &lmc_t1_media;
- break;
- default:
- printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
- unregister_hdlc_device(dev);
- return -EIO;
- break;
- }
-
- lmc_initcsrs (sc, dev->base_addr, 8);
-
- lmc_gpio_mkinput (sc, 0xff);
- sc->lmc_gpio = 0; /* drive no signals yet */
-
- sc->lmc_media->defaults (sc);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /* verify that the PCI Sub System ID matches the Adapter Model number
- * from the MII register
- */
- AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
-
- if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
- subdevice != PCI_DEVICE_ID_LMC_T1) &&
- (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
- subdevice != PCI_DEVICE_ID_LMC_SSI) &&
- (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
- subdevice != PCI_DEVICE_ID_LMC_DS3) &&
- (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
- subdevice != PCI_DEVICE_ID_LMC_HSSI))
- printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
- " Subsystem ID = 0x%04x\n",
- dev->name, AdapModelNum, subdevice);
-
- /*
- * reset clock
- */
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
-
- sc->board_idx = cards_found++;
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
-
- sc->lmc_ok = 0;
- sc->last_link_status = 0;
-
- return 0;
-}
-
-/*
- * Called from pci when removing module.
- */
-static void lmc_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- printk(KERN_DEBUG "%s: removing...\n", dev->name);
- unregister_hdlc_device(dev);
- free_netdev(dev);
- }
-}
-
-/* After this is called, packets can be sent.
- * Does not initialize the addresses
- */
-static int lmc_open(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int err;
-
- lmc_led_on(sc, LMC_DS3_LED0);
-
- lmc_dec_reset(sc);
- lmc_reset(sc);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
- lmc_mii_readreg(sc, 0, 17));
-
- if (sc->lmc_ok)
- return 0;
-
- lmc_softreset (sc);
-
- /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
- if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
- printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- sc->got_irq = 1;
-
- /* Assert Terminal Active */
- sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /*
- * reset to last state.
- */
- sc->lmc_media->set_status (sc, NULL);
-
- /* setup default bits to be used in tulip_desc_t transmit descriptor
- * -baz */
- sc->TxDescriptControlInit = (
- LMC_TDES_INTERRUPT_ON_COMPLETION
- | LMC_TDES_FIRST_SEGMENT
- | LMC_TDES_LAST_SEGMENT
- | LMC_TDES_SECOND_ADDR_CHAINED
- | LMC_TDES_DISABLE_PADDING
- );
-
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
- /* disable 32 bit CRC generated by ASIC */
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- }
- sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
- /* Acknoledge the Terminal Active and light LEDs */
-
- /* dev->flags |= IFF_UP; */
-
- if ((err = lmc_proto_open(sc)) != 0)
- return err;
-
- netif_start_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- /*
- * select what interrupts we want to get
- */
- sc->lmc_intrmask = 0;
- /* Should be using the default interrupt mask defined in the .h file. */
- sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
- | TULIP_STS_RXINTR
- | TULIP_STS_TXINTR
- | TULIP_STS_ABNRMLINTR
- | TULIP_STS_SYSERROR
- | TULIP_STS_TXSTOPPED
- | TULIP_STS_TXUNDERFLOW
- | TULIP_STS_RXSTOPPED
- | TULIP_STS_RXNOBUF
- );
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
- sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- sc->lmc_ok = 1; /* Run watchdog */
-
- /*
- * Set the if up now - pfb
- */
-
- sc->last_link_status = 1;
-
- /*
- * Setup a timer for the watchdog on probe, and start it running.
- * Since lmc_ok == 0, it will be a NOP for now.
- */
- timer_setup(&sc->timer, lmc_watchdog, 0);
- sc->timer.expires = jiffies + HZ;
- add_timer (&sc->timer);
-
- return 0;
-}
-
-/* Total reset to compensate for the AdTran DSU doing bad things
- * under heavy load
- */
-
-static void lmc_running_reset (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- lmc_dec_reset (sc);
- lmc_reset (sc);
- lmc_softreset (sc);
- /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-
- netif_wake_queue(dev);
-
- sc->lmc_txfull = 0;
- sc->extra_stats.tx_tbusy0++;
-
- sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-}
-
-
-/* This is what is called when you ifconfig down a device.
- * This disables the timer for the watchdog and keepalives,
- * and disables the irq for dev.
- */
-static int lmc_close(struct net_device *dev)
-{
- /* not calling release_region() as we should */
- lmc_softc_t *sc = dev_to_sc(dev);
-
- sc->lmc_ok = 0;
- sc->lmc_media->set_link_status (sc, 0);
- del_timer (&sc->timer);
- lmc_proto_close(sc);
- lmc_ifdown (dev);
-
- return 0;
-}
-
-/* Ends the transfer of packets */
-/* When the interface goes down, this is called */
-static int lmc_ifdown (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- int i;
-
- /* Don't let anything else go on right now */
- // dev->start = 0;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- /* Stop Tx and Rx on the chip */
- csr6 = LMC_CSR_READ (sc, csr_command);
- csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
- csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
- LMC_CSR_WRITE (sc, csr_command, csr6);
-
- sc->lmc_device->stats.rx_missed_errors +=
- LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- /* release the interrupt */
- if(sc->got_irq == 1){
- free_irq (dev->irq, dev);
- sc->got_irq = 0;
- }
-
- /* free skbuffs in the Rx queue */
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb = sc->lmc_rxq[i];
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].status = 0;
- sc->lmc_rxring[i].length = 0;
- sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
- if (skb != NULL)
- dev_kfree_skb(skb);
- sc->lmc_rxq[i] = NULL;
- }
-
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL)
- dev_kfree_skb(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
- }
-
- lmc_led_off (sc, LMC_MII16_LED_ALL);
-
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- return 0;
-}
-
-/* Interrupt handling routine. This will take an incoming packet, or clean
- * up after a trasmit.
- */
-static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
-{
- struct net_device *dev = (struct net_device *) dev_instance;
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr;
- int i;
- s32 stat;
- unsigned int badtx;
- int max_work = LMC_RXDESCS;
- int handled = 0;
-
- spin_lock(&sc->lmc_lock);
-
- /*
- * Read the csr to find what interrupts we have (if any)
- */
- csr = LMC_CSR_READ (sc, csr_status);
-
- /*
- * Make sure this is our interrupt
- */
- if ( ! (csr & sc->lmc_intrmask)) {
- goto lmc_int_fail_out;
- }
-
- /* always go through this loop at least once */
- while (csr & sc->lmc_intrmask) {
- handled = 1;
-
- /*
- * Clear interrupt bits, we handle all case below
- */
- LMC_CSR_WRITE (sc, csr_status, csr);
-
- /*
- * One of
- * - Transmit process timed out CSR5<1>
- * - Transmit jabber timeout CSR5<3>
- * - Transmit underflow CSR5<5>
- * - Transmit Receiver buffer unavailable CSR5<7>
- * - Receive process stopped CSR5<8>
- * - Receive watchdog timeout CSR5<9>
- * - Early transmit interrupt CSR5<10>
- *
- * Is this really right? Should we do a running reset for jabber?
- * (being a WAN card and all)
- */
- if (csr & TULIP_STS_ABNRMLINTR){
- lmc_running_reset (dev);
- break;
- }
-
- if (csr & TULIP_STS_RXINTR)
- lmc_rx (dev);
-
- if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
-
- int n_compl = 0 ;
- /* reset the transmit timeout detection flag -baz */
- sc->extra_stats.tx_NoCompleteCnt = 0;
-
- badtx = sc->lmc_taint_tx;
- i = badtx % LMC_TXDESCS;
-
- while ((badtx < sc->lmc_next_tx)) {
- stat = sc->lmc_txring[i].status;
-
- LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
- sc->lmc_txring[i].length);
- /*
- * If bit 31 is 1 the tulip owns it break out of the loop
- */
- if (stat & 0x80000000)
- break;
-
- n_compl++ ; /* i.e., have an empty slot in ring */
- /*
- * If we have no skbuff or have cleared it
- * Already continue to the next buffer
- */
- if (sc->lmc_txq[i] == NULL)
- continue;
-
- /*
- * Check the total error summary to look for any errors
- */
- if (stat & 0x8000) {
- sc->lmc_device->stats.tx_errors++;
- if (stat & 0x4104)
- sc->lmc_device->stats.tx_aborted_errors++;
- if (stat & 0x0C00)
- sc->lmc_device->stats.tx_carrier_errors++;
- if (stat & 0x0200)
- sc->lmc_device->stats.tx_window_errors++;
- if (stat & 0x0002)
- sc->lmc_device->stats.tx_fifo_errors++;
- } else {
- sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
-
- sc->lmc_device->stats.tx_packets++;
- }
-
- dev_consume_skb_irq(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
-
- badtx++;
- i = badtx % LMC_TXDESCS;
- }
-
- if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
- {
- printk ("%s: out of sync pointer\n", dev->name);
- badtx += LMC_TXDESCS;
- }
- LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
- sc->lmc_txfull = 0;
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
-
-#ifdef DEBUG
- sc->extra_stats.dirtyTx = badtx;
- sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
- sc->extra_stats.lmc_txfull = sc->lmc_txfull;
-#endif
- sc->lmc_taint_tx = badtx;
-
- /*
- * Why was there a break here???
- */
- } /* end handle transmit interrupt */
-
- if (csr & TULIP_STS_SYSERROR) {
- u32 error;
- printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
- error = csr>>23 & 0x7;
- switch(error){
- case 0x000:
- printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
- break;
- case 0x001:
- printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
- break;
- case 0x002:
- printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
- break;
- default:
- printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
- }
- lmc_dec_reset (sc);
- lmc_reset (sc);
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- }
-
-
- if(max_work-- <= 0)
- break;
-
- /*
- * Get current csr status to make sure
- * we've cleared all interrupts
- */
- csr = LMC_CSR_READ (sc, csr_status);
- } /* end interrupt loop */
- LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
-
-lmc_int_fail_out:
-
- spin_unlock(&sc->lmc_lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 flag;
- int entry;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- /* normal path, tbusy known to be zero */
-
- entry = sc->lmc_next_tx % LMC_TXDESCS;
-
- sc->lmc_txq[entry] = skb;
- sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
-
- LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
-
-#ifndef GCOM
- /* If the queue is less than half full, don't interrupt */
- if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- }
-#else
- flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
-
- if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
- { /* ring full, go busy */
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
- LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
- }
-#endif
-
-
- if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
- flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
-
- /* don't pad small packets either */
- flag = sc->lmc_txring[entry].length = (skb->len) | flag |
- sc->TxDescriptControlInit;
-
- /* set the transmit timeout flag to be checked in
- * the watchdog timer handler. -baz
- */
-
- sc->extra_stats.tx_NoCompleteCnt++;
- sc->lmc_next_tx++;
-
- /* give ownership to the chip */
- LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
- sc->lmc_txring[entry].status = 0x80000000;
-
- /* send now! */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-static int lmc_rx(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int i;
- int rx_work_limit = LMC_RXDESCS;
- int rxIntLoopCnt; /* debug -baz */
- int localLengthErrCnt = 0;
- long stat;
- struct sk_buff *skb, *nsb;
- u16 len;
-
- lmc_led_on(sc, LMC_DS3_LED3);
-
- rxIntLoopCnt = 0; /* debug -baz */
-
- i = sc->lmc_next_rx % LMC_RXDESCS;
-
- while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++; /* debug -baz */
- len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
- if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
- if ((stat & 0x0000ffff) != 0x7fff) {
- /* Oversized frame */
- sc->lmc_device->stats.rx_length_errors++;
- goto skip_packet;
- }
- }
-
- if (stat & 0x00000008) { /* Catch a dribbling bit error */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_frame_errors++;
- goto skip_packet;
- }
-
-
- if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_crc_errors++;
- goto skip_packet;
- }
-
- if (len > LMC_PKT_BUF_SZ) {
- sc->lmc_device->stats.rx_length_errors++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if (len < sc->lmc_crcSize + 2) {
- sc->lmc_device->stats.rx_length_errors++;
- sc->extra_stats.rx_SmallPktCnt++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if(stat & 0x00004000){
- printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
- }
-
- len -= sc->lmc_crcSize;
-
- skb = sc->lmc_rxq[i];
-
- /*
- * We ran out of memory at some point
- * just allocate an skb buff and continue.
- */
-
- if (!skb) {
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- }
- sc->failed_recv_alloc = 1;
- goto skip_packet;
- }
-
- sc->lmc_device->stats.rx_packets++;
- sc->lmc_device->stats.rx_bytes += len;
-
- LMC_CONSOLE_LOG("recv", skb->data, len);
-
- /*
- * I'm not sure of the sanity of this
- * Packets could be arriving at a constant
- * 44.210mbits/sec and we're going to copy
- * them into a new buffer??
- */
-
- if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
- /*
- * If it's a large packet don't copy it just hand it up
- */
- give_it_anyways:
-
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].buffer1 = 0x0;
-
- skb_put (skb, len);
- skb->protocol = lmc_proto_type(sc, skb);
- skb_reset_mac_header(skb);
- /* skb_reset_network_header(skb); */
- skb->dev = dev;
- lmc_proto_netif(sc, skb);
-
- /*
- * This skb will be destroyed by the upper layers, make a new one
- */
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- /* Transferred to 21140 below */
- }
- else {
- /*
- * We've run out of memory, stop trying to allocate
- * memory and exit the interrupt handler
- *
- * The chip may run out of receivers and stop
- * in which care we'll try to allocate the buffer
- * again. (once a second)
- */
- sc->extra_stats.rx_BuffAllocErr++;
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->failed_recv_alloc = 1;
- goto skip_out_of_mem;
- }
- }
- else {
- nsb = dev_alloc_skb(len);
- if(!nsb) {
- goto give_it_anyways;
- }
- skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
-
- nsb->protocol = lmc_proto_type(sc, nsb);
- skb_reset_mac_header(nsb);
- /* skb_reset_network_header(nsb); */
- nsb->dev = dev;
- lmc_proto_netif(sc, nsb);
- }
-
- skip_packet:
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
-
- sc->lmc_next_rx++;
- i = sc->lmc_next_rx % LMC_RXDESCS;
- rx_work_limit--;
- if (rx_work_limit < 0)
- break;
- }
-
- /* detect condition for LMC1000 where DSU cable attaches and fills
- * descriptors with bogus packets
- *
- if (localLengthErrCnt > LMC_RXDESCS - 3) {
- sc->extra_stats.rx_BadPktSurgeCnt++;
- LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
- sc->extra_stats.rx_BadPktSurgeCnt);
- } */
-
- /* save max count of receive descriptors serviced */
- if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
- sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
-
-#ifdef DEBUG
- if (rxIntLoopCnt == 0)
- {
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
- != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++;
- }
- }
- LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
- }
-#endif
-
-
- lmc_led_off(sc, LMC_DS3_LED3);
-
-skip_out_of_mem:
- return 0;
-}
-
-static struct net_device_stats *lmc_get_stats(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return &sc->lmc_device->stats;
-}
-
-static struct pci_driver lmc_driver = {
- .name = "lmc",
- .id_table = lmc_pci_tbl,
- .probe = lmc_init_one,
- .remove = lmc_remove_one,
-};
-
-module_pci_driver(lmc_driver);
-
-unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
-{
- int i;
- int command = (0xf6 << 10) | (devaddr << 5) | regno;
- int retval = 0;
-
- LMC_MII_SYNC (sc);
-
- for (i = 15; i >= 0; i--)
- {
- int dataval = (command & (1 << i)) ? 0x20000 : 0;
-
- LMC_CSR_WRITE (sc, csr_9, dataval);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- for (i = 19; i > 0; i--)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
- LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- return (retval >> 1) & 0xffff;
-}
-
-void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
-{
- int i = 32;
- int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
-
- LMC_MII_SYNC (sc);
-
- i = 31;
- while (i >= 0)
- {
- int datav;
-
- if (command & (1 << i))
- datav = 0x20000;
- else
- datav = 0x00000;
-
- LMC_CSR_WRITE (sc, csr_9, datav);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-
- i = 2;
- while (i > 0)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, 0x50000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-}
-
-static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
-{
- int i;
-
- /* Initialize the receive rings and buffers. */
- sc->lmc_txfull = 0;
- sc->lmc_next_rx = 0;
- sc->lmc_next_tx = 0;
- sc->lmc_taint_rx = 0;
- sc->lmc_taint_tx = 0;
-
- /*
- * Setup each one of the receiver buffers
- * allocate an skbuff for each one, setup the descriptor table
- * and point each buffer at the next one
- */
-
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb;
-
- if (sc->lmc_rxq[i] == NULL)
- {
- skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if(skb == NULL){
- printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
- sc->failed_ring = 1;
- break;
- }
- else{
- sc->lmc_rxq[i] = skb;
- }
- }
- else
- {
- skb = sc->lmc_rxq[i];
- }
-
- skb->dev = sc->lmc_device;
-
- /* owned by 21140 */
- sc->lmc_rxring[i].status = 0x80000000;
-
- /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
- sc->lmc_rxring[i].length = skb_tailroom(skb);
-
- /* use to be tail which is dumb since you're thinking why write
- * to the end of the packj,et but since there's nothing there tail == data
- */
- sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
-
- /* This is fair since the structure is static and we have the next address */
- sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
-
- }
-
- /*
- * Sets end of ring
- */
- if (i != 0) {
- sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
- sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
- }
- LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
-
- /* Initialize the transmit rings and buffers */
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL){ /* have buffer */
- dev_kfree_skb(sc->lmc_txq[i]); /* free it */
- sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
- }
- sc->lmc_txq[i] = NULL;
- sc->lmc_txring[i].status = 0x00000000;
- sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
- }
- sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
- LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-}
-
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io &= ~bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io |= bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if ((~sc->lmc_miireg16) & led) /* Already on! */
- return;
-
- sc->lmc_miireg16 &= ~led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if (sc->lmc_miireg16 & led) /* Already set don't do anything */
- return;
-
- sc->lmc_miireg16 |= led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
-{
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- /*
- * make some of the GPIO pins be outputs
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force state reset. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
- sc->lmc_gpio &= ~(LMC_GEP_RESET);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, LMC_GEP_RESET);
-
- /*
- * Call media specific init routine
- */
- sc->lmc_media->init(sc);
-
- sc->extra_stats.resetCount++;
-}
-
-static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
-{
- u32 val;
-
- /*
- * disable all interrupts
- */
- sc->lmc_intrmask = 0;
- LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
-
- /*
- * Reset the chip with a software reset command.
- * Wait 10 microseconds (actually 50 PCI cycles but at
- * 33MHz that comes to two microseconds but wait a
- * bit longer anyways)
- */
- LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
- udelay(25);
-#ifdef __sparc__
- sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
- sc->lmc_busmode = 0x00100000;
- sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
- LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
-#endif
- sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
-
- /*
- * We want:
- * no ethernet address in frames we write
- * disable padding (txdesc, padding disable)
- * ignore runt frames (rdes0 bit 15)
- * no receiver watchdog or transmitter jabber timer
- * (csr15 bit 0,14 == 1)
- * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
- */
-
- sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
- | TULIP_CMD_FULLDUPLEX
- | TULIP_CMD_PASSBADPKT
- | TULIP_CMD_NOHEARTBEAT
- | TULIP_CMD_PORTSELECT
- | TULIP_CMD_RECEIVEALL
- | TULIP_CMD_MUSTBEONE
- );
- sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
- | TULIP_CMD_THRESHOLDCTL
- | TULIP_CMD_STOREFWD
- | TULIP_CMD_TXTHRSHLDCTL
- );
-
- LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
-
- /*
- * disable receiver watchdog and transmit jabber
- */
- val = LMC_CSR_READ(sc, csr_sia_general);
- val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
- LMC_CSR_WRITE(sc, csr_sia_general, val);
-}
-
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
- size_t csr_size)
-{
- sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
- sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
- sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
- sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
- sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
- sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
- sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
- sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
- sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
- sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
- sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
- sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
- sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
- sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
- sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
- sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
-}
-
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- printk("%s: Xmitter busy|\n", dev->name);
-
- sc->extra_stats.tx_tbusy_calls++;
- if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT))
- goto bug_out;
-
- /*
- * Chip seems to have locked up
- * Reset it
- * This whips out all our descriptor
- * table and starts from scartch
- */
-
- LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
- LMC_CSR_READ (sc, csr_status),
- sc->extra_stats.tx_ProcTimeout);
-
- lmc_running_reset (dev);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- /* restart the tx processes */
- csr6 = LMC_CSR_READ (sc, csr_command);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
-
- /* immediate transmit */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- sc->lmc_device->stats.tx_errors++;
- sc->extra_stats.tx_ProcTimeout++; /* -baz */
-
- netif_trans_update(dev); /* prevent tx timeout */
-
-bug_out:
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
deleted file mode 100644
index ec1ac7b1f3fd..000000000000
--- a/drivers/net/wan/lmc/lmc_media.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/uaccess.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-
-#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-/*
- * protocol independent method.
- */
-static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-static void lmc_ds3_init (lmc_softc_t * const);
-static void lmc_ds3_default (lmc_softc_t * const);
-static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
-static int lmc_ds3_get_link_status (lmc_softc_t * const);
-static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ds3_set_scram (lmc_softc_t * const, int);
-static void lmc_ds3_watchdog (lmc_softc_t * const);
-
-static void lmc_hssi_init (lmc_softc_t * const);
-static void lmc_hssi_default (lmc_softc_t * const);
-static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_hssi_set_clock (lmc_softc_t * const, int);
-static int lmc_hssi_get_link_status (lmc_softc_t * const);
-static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_hssi_watchdog (lmc_softc_t * const);
-
-static void lmc_ssi_init (lmc_softc_t * const);
-static void lmc_ssi_default (lmc_softc_t * const);
-static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ssi_set_clock (lmc_softc_t * const, int);
-static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_ssi_get_link_status (lmc_softc_t * const);
-static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ssi_watchdog (lmc_softc_t * const);
-
-static void lmc_t1_init (lmc_softc_t * const);
-static void lmc_t1_default (lmc_softc_t * const);
-static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_t1_get_link_status (lmc_softc_t * const);
-static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
-static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
-static void lmc_t1_set_clock (lmc_softc_t * const, int);
-static void lmc_t1_watchdog (lmc_softc_t * const);
-
-static void lmc_dummy_set_1 (lmc_softc_t * const, int);
-static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
-
-static inline void write_av9110_bit (lmc_softc_t *, int);
-static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
-
-lmc_media_t lmc_ds3_media = {
- .init = lmc_ds3_init, /* special media init stuff */
- .defaults = lmc_ds3_default, /* reset to default state */
- .set_status = lmc_ds3_set_status, /* reset status to state provided */
- .set_clock_source = lmc_dummy_set_1, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
- .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
- .get_link_status = lmc_ds3_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ds3_watchdog
-};
-
-lmc_media_t lmc_hssi_media = {
- .init = lmc_hssi_init, /* special media init stuff */
- .defaults = lmc_hssi_default, /* reset to default state */
- .set_status = lmc_hssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_hssi_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_hssi_get_link_status, /* get link status */
- .set_link_status = lmc_hssi_set_link_status, /* set link status */
- .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_hssi_watchdog
-};
-
-lmc_media_t lmc_ssi_media = {
- .init = lmc_ssi_init, /* special media init stuff */
- .defaults = lmc_ssi_default, /* reset to default state */
- .set_status = lmc_ssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_ssi_set_clock, /* set clock source */
- .set_speed = lmc_ssi_set_speed, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_ssi_get_link_status, /* get link status */
- .set_link_status = lmc_ssi_set_link_status, /* set link status */
- .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ssi_watchdog
-};
-
-lmc_media_t lmc_t1_media = {
- .init = lmc_t1_init, /* special media init stuff */
- .defaults = lmc_t1_default, /* reset to default state */
- .set_status = lmc_t1_set_status, /* reset status to state provided */
- .set_clock_source = lmc_t1_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_t1_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- .watchdog = lmc_t1_watchdog
-};
-
-static void
-lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
-{
-}
-
-static void
-lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
-{
-}
-
-/*
- * HSSI methods
- */
-
-static void
-lmc_hssi_init (lmc_softc_t * const sc)
-{
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
-
- lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
-}
-
-static void
-lmc_hssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source && !sc->ictl.clock_source)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (!ctl->clock_source && sc->ictl.clock_source)
- {
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- }
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = sc->ictl.clock_source;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_hssi_get_link_status (lmc_softc_t * const sc)
-{
- /*
- * We're using the same code as SSI since
- * they're practically the same
- */
- return lmc_ssi_get_link_status(sc);
-}
-
-static void
-lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
- else
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_hssi_watchdog (lmc_softc_t * const sc)
-{
- /* HSSI is blank */
-}
-
-/*
- * DS3 methods
- */
-
-/*
- * Set cable length
- */
-static void
-lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
- }
- else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
- sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
- sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in cable length setting
- */
- if (ctl->cable_length && !sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
- else if (!ctl->cable_length && sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
-
- /*
- * Check for change in scrambler setting (requires reset)
- */
- if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_ON);
- else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_OFF);
-
- lmc_set_protocol (sc, ctl);
-}
-
-static void
-lmc_ds3_init (lmc_softc_t * const sc)
-{
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
-
- /* writes zeros everywhere */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_writereg (sc, 0, 18, 0);
- }
-
- /* set some essential bits */
- lmc_mii_writereg (sc, 0, 17, 1);
- lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */
-
- lmc_mii_writereg (sc, 0, 17, 5);
- lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */
-
- lmc_mii_writereg (sc, 0, 17, 14);
- lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */
-
- /* clear counters and latched bits */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_readreg (sc, 0, 18);
- }
-}
-
-/*
- * 1 == DS3 payload scrambled, 0 == not scrambled
- */
-static void
-lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_ON)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_ON;
- }
- else
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_OFF;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ds3_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status, link_status_11;
- int ret = 1;
-
- lmc_mii_writereg (sc, 0, 17, 7);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
-
- lmc_led_on(sc, LMC_DS3_LED2);
-
- if ((link_status & LMC_FRAMER_REG0_DLOS) ||
- (link_status & LMC_FRAMER_REG0_OOFS)){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 &= 0xfe;
- lmc_mii_writereg(sc, 0, 18, r1);
- printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */
- sc->last_led_err[3] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
- if(sc->last_led_err[3] == 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 |= 0x01;
- lmc_mii_writereg(sc, 0, 18, r1);
- }
- sc->last_led_err[3] = 0;
- }
-
- lmc_mii_writereg(sc, 0, 17, 0x10);
- link_status_11 = lmc_mii_readreg(sc, 0, 18);
- if((link_status & LMC_FRAMER_REG0_AIS) ||
- (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
- printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- lmc_mii_writereg (sc, 0, 17, 9);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- if(link_status & LMC_FRAMER_REG9_RBLUE){
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- return ret;
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_watchdog (lmc_softc_t * const sc)
-{
-
-}
-
-
-/*
- * SSI methods
- */
-
-static void lmc_ssi_init(lmc_softc_t * const sc)
-{
- u16 mii17;
- int cable;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
-
- mii17 = lmc_mii_readreg(sc, 0, 17);
-
- cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
- sc->ictl.cable_type = cable;
-
- lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
-}
-
-static void
-lmc_ssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- /*
- * make TXCLOCK always be an output
- */
- lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_speed (sc, NULL);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- sc->lmc_media->set_speed (sc, &sc->ictl);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- }
-
- if (ctl->clock_rate != sc->ictl.clock_rate)
- sc->lmc_media->set_speed (sc, ctl);
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- lmc_ctl_t *ictl = &sc->ictl;
- lmc_av9110_t *av;
-
- /* original settings for clock rate of:
- * 100 Khz (8,25,0,0,2) were incorrect
- * they should have been 80,125,1,3,3
- * There are 17 param combinations to produce this freq.
- * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
- */
- if (ctl == NULL)
- {
- av = &ictl->cardspec.ssi;
- ictl->clock_rate = 1500000;
- av->f = ictl->clock_rate;
- av->n = 120;
- av->m = 100;
- av->v = 1;
- av->x = 1;
- av->r = 2;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
- return;
- }
-
- av = &ctl->cardspec.ssi;
-
- if (av->f == 0)
- return;
-
- ictl->clock_rate = av->f; /* really, this is the rate we are */
- ictl->cardspec.ssi = *av;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ssi_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- u32 ticks;
- int ret = 1;
- int hw_hdsk = 1;
-
- /*
- * missing CTS? Hmm. If we require CTS on, we may never get the
- * link to come up, so omit it in this test.
- *
- * Also, it seems that with a loopback cable, DCD isn't asserted,
- * so just check for things like this:
- * DSR _must_ be asserted.
- * One of DCD or CTS must be asserted.
- */
-
- /* LMC 1000 (SSI) LED definitions
- * led0 Green = power to adapter, Gate Array loaded &
- * driver attached
- * led1 Green = DSR and DTR and RTS and CTS are set
- * led2 Green = Cable detected
- * led3 red = No timing is available from the
- * cable or the on-board frequency
- * generator.
- */
-
- link_status = lmc_mii_readreg (sc, 0, 16);
-
- /* Is the transmit clock still available */
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- lmc_led_on (sc, LMC_MII16_LED0);
-
- /* ====== transmit clock determination ===== */
- if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
- lmc_led_off(sc, LMC_MII16_LED3);
- }
- else if (ticks == 0 ) { /* no clock found ? */
- ret = 0;
- if (sc->last_led_err[3] != 1) {
- sc->extra_stats.tx_lossOfClockCnt++;
- printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
- }
- sc->last_led_err[3] = 1;
- lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
- }
- else {
- if(sc->last_led_err[3] == 1)
- printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
- sc->last_led_err[3] = 0;
- lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */
- }
-
- if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
- ret = 0;
- hw_hdsk = 0;
- }
-
-#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
- if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
- ret = 0;
- hw_hdsk = 0;
- }
-#endif
-
- if(hw_hdsk == 0){
- if(sc->last_led_err[1] != 1)
- printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
- sc->last_led_err[1] = 1;
- lmc_led_off(sc, LMC_MII16_LED1);
- }
- else {
- if(sc->last_led_err[1] != 0)
- printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
- sc->last_led_err[1] = 0;
- lmc_led_on(sc, LMC_MII16_LED1);
- }
-
- if(ret == 1) {
- lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
- }
-
- return ret;
-}
-
-static void
-lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- {
- sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * These are bits to program the ssi frequency generator
- */
-static inline void
-write_av9110_bit (lmc_softc_t * sc, int c)
-{
- /*
- * set the data bit as we need it.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- if (c & 0x01)
- sc->lmc_gpio |= LMC_GEP_DATA;
- else
- sc->lmc_gpio &= ~(LMC_GEP_DATA);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to high
- */
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to low again.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-}
-
-static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
-{
- int i;
-
-#if 0
- printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
- LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
-#endif
-
- sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
- sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
- * as outputs.
- */
- lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-
- sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * a shifting we will go...
- */
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, n >> i);
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, m >> i);
- for (i = 0; i < 1; i++)
- write_av9110_bit (sc, v >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, x >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, r >> i);
- for (i = 0; i < 5; i++)
- write_av9110_bit (sc, 0x17 >> i);
-
- /*
- * stop driving serial-related signals
- */
- lmc_gpio_mkinput (sc,
- (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-}
-
-static void lmc_ssi_watchdog(lmc_softc_t * const sc)
-{
- u16 mii17 = lmc_mii_readreg(sc, 0, 17);
- if (((mii17 >> 3) & 7) == 7)
- lmc_led_off(sc, LMC_MII16_LED2);
- else
- lmc_led_on(sc, LMC_MII16_LED2);
-}
-
-/*
- * T1 methods
- */
-
-/*
- * The framer regs are multiplexed through MII regs 17 & 18
- * write the register address to MII reg 17 and the * data to MII reg 18. */
-static void
-lmc_t1_write (lmc_softc_t * const sc, int a, int d)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- lmc_mii_writereg (sc, 0, 18, d);
-}
-
-/* Save a warning
-static int
-lmc_t1_read (lmc_softc_t * const sc, int a)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- return lmc_mii_readreg (sc, 0, 18);
-}
-*/
-
-
-static void
-lmc_t1_init (lmc_softc_t * const sc)
-{
- u16 mii16;
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
- mii16 = lmc_mii_readreg (sc, 0, 16);
-
- /* reset 8370 */
- mii16 &= ~LMC_MII16_T1_RST;
- lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
- lmc_mii_writereg (sc, 0, 16, mii16);
-
- /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */
- sc->lmc_miireg16 = mii16;
- lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
- mii16 = sc->lmc_miireg16;
-
- lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */
- lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */
- lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */
- lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */
- lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */
- lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */
- lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */
- lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */
- lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */
- lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */
- lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */
- lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */
- lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */
- lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */
- lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */
- lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */
- lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */
- lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */
- lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */
- lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */
- lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */
- lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */
- lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */
- lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */
- lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */
- lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */
- for (i = 0; i < 32; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */
- lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */
- lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */
- }
- for (i = 1; i < 25; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */
- }
-
- mii16 |= LMC_MII16_T1_XOE;
- lmc_mii_writereg (sc, 0, 16, mii16);
- sc->lmc_miireg16 = mii16;
-}
-
-static void
-lmc_t1_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
- /* Right now we can only clock from out internal source */
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-}
-/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed.
- */
-static void
-lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
- /*
- * check for change in circuit type */
- if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
- && sc->ictl.circuit_type ==
- LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
- LMC_CTL_CIRCUIT_TYPE_E1);
- else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
- && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- lmc_set_protocol (sc, ctl);
-}
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */ static int
-lmc_t1_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- int ret = 1;
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
- lmc_led_on(sc, LMC_DS3_LED2);
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
-
- if (link_status & T1F_RAIS) { /* turn on blue LED */
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- if(sc->last_led_err[1] != 0){
- printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
- }
- lmc_led_off (sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- /*
- * Yellow Alarm is nasty evil stuff, looks at data patterns
- * inside the channel and confuses it with HDLC framing
- * ignore all yellow alarms.
- *
- * Do listen to MultiFrame Yellow alarm which while implemented
- * different ways isn't in the channel and hence somewhat
- * more reliable
- */
-
- if (link_status & T1F_RMYEL) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- if(sc->last_led_err[0] != 0){
- printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- /*
- * Loss of signal and los of frame
- * Use the green bit to identify which one lit the led
- */
- if(link_status & T1F_RLOF){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 1;
-
- }
- else {
- if(sc->last_led_err[3] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOS))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 0;
- }
-
- if(link_status & T1F_RLOS){
- ret = 0;
- if(sc->last_led_err[2] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 1;
-
- }
- else {
- if(sc->last_led_err[2] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOF))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 0;
- }
-
- sc->lmc_xinfo.t1_alarm1_status = link_status;
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
- sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
- return ret;
-}
-
-/*
- * 1 == T1 Circuit Type , 0 == E1 Circuit Type
- */
-static void
-lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
- sc->lmc_miireg16 |= LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
- printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
- }
- else {
- sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
- printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit */
-static void
-lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
-
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_t1_watchdog (lmc_softc_t * const sc)
-{
-}
-
-static void
-lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (!ctl)
- sc->ictl.keepalive_onoff = LMC_CTL_ON;
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
deleted file mode 100644
index e5487616a816..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Allan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/workqueue.h>
-#include <linux/proc_fs.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/smp.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_debug.h"
-#include "lmc_ioctl.h"
-#include "lmc_proto.h"
-
-// attach
-void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
-{
- if (sc->if_type == LMC_NET) {
- struct net_device *dev = sc->lmc_device;
- /*
- * They set a few basics because they don't use HDLC
- */
- dev->flags |= IFF_POINTOPOINT;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- }
-}
-
-int lmc_proto_open(lmc_softc_t *sc)
-{
- int ret = 0;
-
- if (sc->if_type == LMC_PPP) {
- ret = hdlc_open(sc->lmc_device);
- if (ret < 0)
- printk(KERN_WARNING "%s: HDLC open failed: %d\n",
- sc->name, ret);
- }
- return ret;
-}
-
-void lmc_proto_close(lmc_softc_t *sc)
-{
- if (sc->if_type == LMC_PPP)
- hdlc_close(sc->lmc_device);
-}
-
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- return hdlc_type_trans(skb, sc->lmc_device);
- case LMC_NET:
- return htons(ETH_P_802_2);
- case LMC_RAW: /* Packet type for skbuff kind of useless */
- return htons(ETH_P_802_2);
- default:
- printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
- return htons(ETH_P_802_2);
- }
-}
-
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- case LMC_NET:
- default:
- netif_rx(skb);
- break;
- case LMC_RAW:
- break;
- }
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
deleted file mode 100644
index e56e7072de44..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_PROTO_H_
-#define _LMC_PROTO_H_
-
-#include <linux/hdlc.h>
-
-void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_open(lmc_softc_t *sc);
-void lmc_proto_close(lmc_softc_t *sc);
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
-
-static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
-{
- return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
-}
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
deleted file mode 100644
index 99f0aa787a35..000000000000
--- a/drivers/net/wan/lmc/lmc_var.h
+++ /dev/null
@@ -1,468 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_VAR_H_
-#define _LMC_VAR_H_
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#include <linux/timer.h>
-
-/*
- * basic definitions used in lmc include files
- */
-
-typedef struct lmc___softc lmc_softc_t;
-typedef struct lmc___media lmc_media_t;
-typedef struct lmc___ctl lmc_ctl_t;
-
-#define lmc_csrptr_t unsigned long
-
-#define LMC_REG_RANGE 0x80
-
-#define LMC_PRINTF_FMT "%s"
-#define LMC_PRINTF_ARGS (sc->lmc_device->name)
-
-#define TX_TIMEOUT (2*HZ)
-
-#define LMC_TXDESCS 32
-#define LMC_RXDESCS 32
-
-#define LMC_LINK_UP 1
-#define LMC_LINK_DOWN 0
-
-/* These macros for generic read and write to and from the dec chip */
-#define LMC_CSR_READ(sc, csr) \
- inl((sc)->lmc_csrs.csr)
-#define LMC_CSR_WRITE(sc, reg, val) \
- outl((val), (sc)->lmc_csrs.reg)
-
-//#ifdef _LINUX_DELAY_H
-// #define SLOW_DOWN_IO udelay(2);
-// #undef __SLOW_DOWN_IO
-// #define __SLOW_DOWN_IO udelay(2);
-//#endif
-
-#define DELAY(n) SLOW_DOWN_IO
-
-#define lmc_delay() inl(sc->lmc_csrs.csr_9)
-
-/* This macro sync's up with the mii so that reads and writes can take place */
-#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
- LMC_CSR_WRITE((sc), csr_9, 0x20000); \
- lmc_delay(); \
- LMC_CSR_WRITE((sc), csr_9, 0x30000); \
- lmc_delay(); \
- n--; }} while(0)
-
-struct lmc_regfile_t {
- lmc_csrptr_t csr_busmode; /* CSR0 */
- lmc_csrptr_t csr_txpoll; /* CSR1 */
- lmc_csrptr_t csr_rxpoll; /* CSR2 */
- lmc_csrptr_t csr_rxlist; /* CSR3 */
- lmc_csrptr_t csr_txlist; /* CSR4 */
- lmc_csrptr_t csr_status; /* CSR5 */
- lmc_csrptr_t csr_command; /* CSR6 */
- lmc_csrptr_t csr_intr; /* CSR7 */
- lmc_csrptr_t csr_missed_frames; /* CSR8 */
- lmc_csrptr_t csr_9; /* CSR9 */
- lmc_csrptr_t csr_10; /* CSR10 */
- lmc_csrptr_t csr_11; /* CSR11 */
- lmc_csrptr_t csr_12; /* CSR12 */
- lmc_csrptr_t csr_13; /* CSR13 */
- lmc_csrptr_t csr_14; /* CSR14 */
- lmc_csrptr_t csr_15; /* CSR15 */
-};
-
-#define csr_enetrom csr_9 /* 21040 */
-#define csr_reserved csr_10 /* 21040 */
-#define csr_full_duplex csr_11 /* 21040 */
-#define csr_bootrom csr_10 /* 21041/21140A/?? */
-#define csr_gp csr_12 /* 21140* */
-#define csr_watchdog csr_15 /* 21140* */
-#define csr_gp_timer csr_11 /* 21041/21140* */
-#define csr_srom_mii csr_9 /* 21041/21140* */
-#define csr_sia_status csr_12 /* 2104x */
-#define csr_sia_connectivity csr_13 /* 2104x */
-#define csr_sia_tx_rx csr_14 /* 2104x */
-#define csr_sia_general csr_15 /* 2104x */
-
-/* tulip length/control transmit descriptor definitions
- * used to define bits in the second tulip_desc_t field (length)
- * for the transmit descriptor -baz */
-
-#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
-#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
-#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
-#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
-#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
-#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
-#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
-#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
-#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
-#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
-#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
-#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
-
-#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
-#define TDES_COLLISION_COUNT_BIT_NUMBER 3
-
-/* Constants for the RCV descriptor RDES */
-
-#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
-#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
-#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
-#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
-#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
-#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
-#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
-#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
-#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
-#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
-#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
-#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
-#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
-#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
-#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
-#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
-#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
-
-#define RDES_FRAME_LENGTH_BIT_NUMBER 16
-
-#define LMC_RDES_ERROR_MASK ( (u32)( \
- LMC_RDES_OVERFLOW \
- | LMC_RDES_DRIBBLING_BIT \
- | LMC_RDES_REPORT_ON_MII_ERR \
- | LMC_RDES_COLLISION_SEEN ) )
-
-
-/*
- * Ioctl info
- */
-
-typedef struct {
- u32 n;
- u32 m;
- u32 v;
- u32 x;
- u32 r;
- u32 f;
- u32 exact;
-} lmc_av9110_t;
-
-/*
- * Common structure passed to the ioctl code.
- */
-struct lmc___ctl {
- u32 cardtype;
- u32 clock_source; /* HSSI, T1 */
- u32 clock_rate; /* T1 */
- u32 crc_length;
- u32 cable_length; /* DS3 */
- u32 scrambler_onoff; /* DS3 */
- u32 cable_type; /* T1 */
- u32 keepalive_onoff; /* protocol */
- u32 ticks; /* ticks/sec */
- union {
- lmc_av9110_t ssi;
- } cardspec;
- u32 circuit_type; /* T1 or E1 */
-};
-
-
-/*
- * Careful, look at the data sheet, there's more to this
- * structure than meets the eye. It should probably be:
- *
- * struct tulip_desc_t {
- * u8 own:1;
- * u32 status:31;
- * u32 control:10;
- * u32 buffer1;
- * u32 buffer2;
- * };
- * You could also expand status control to provide more bit information
- */
-
-struct tulip_desc_t {
- s32 status;
- s32 length;
- u32 buffer1;
- u32 buffer2;
-};
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-struct lmc___media {
- void (* init)(lmc_softc_t * const);
- void (* defaults)(lmc_softc_t * const);
- void (* set_status)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_clock_source)(lmc_softc_t * const, int);
- void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_cable_length)(lmc_softc_t * const, int);
- void (* set_scrambler)(lmc_softc_t * const, int);
- int (* get_link_status)(lmc_softc_t * const);
- void (* set_link_status)(lmc_softc_t * const, int);
- void (* set_crc_length)(lmc_softc_t * const, int);
- void (* set_circuit_type)(lmc_softc_t * const, int);
- void (* watchdog)(lmc_softc_t * const);
-};
-
-
-#define STATCHECK 0xBEEFCAFE
-
-struct lmc_extra_statistics
-{
- u32 version_size;
- u32 lmc_cardtype;
-
- u32 tx_ProcTimeout;
- u32 tx_IntTimeout;
- u32 tx_NoCompleteCnt;
- u32 tx_MaxXmtsB4Int;
- u32 tx_TimeoutCnt;
- u32 tx_OutOfSyncPtr;
- u32 tx_tbusy0;
- u32 tx_tbusy1;
- u32 tx_tbusy_calls;
- u32 resetCount;
- u32 lmc_txfull;
- u32 tbusy;
- u32 dirtyTx;
- u32 lmc_next_tx;
- u32 otherTypeCnt;
- u32 lastType;
- u32 lastTypeOK;
- u32 txLoopCnt;
- u32 usedXmtDescripCnt;
- u32 txIndexCnt;
- u32 rxIntLoopCnt;
-
- u32 rx_SmallPktCnt;
- u32 rx_BadPktSurgeCnt;
- u32 rx_BuffAllocErr;
- u32 tx_lossOfClockCnt;
-
- /* T1 error counters */
- u32 framingBitErrorCount;
- u32 lineCodeViolationCount;
-
- u32 lossOfFrameCount;
- u32 changeOfFrameAlignmentCount;
- u32 severelyErroredFrameCount;
-
- u32 check;
-};
-
-typedef struct lmc_xinfo {
- u32 Magic0; /* BEEFCAFE */
-
- u32 PciCardType;
- u32 PciSlotNumber; /* PCI slot number */
-
- u16 DriverMajorVersion;
- u16 DriverMinorVersion;
- u16 DriverSubVersion;
-
- u16 XilinxRevisionNumber;
- u16 MaxFrameSize;
-
- u16 t1_alarm1_status;
- u16 t1_alarm2_status;
-
- int link_status;
- u32 mii_reg16;
-
- u32 Magic1; /* DEADBEEF */
-} LMC_XINFO;
-
-
-/*
- * forward decl
- */
-struct lmc___softc {
- char *name;
- u8 board_idx;
- struct lmc_extra_statistics extra_stats;
- struct net_device *lmc_device;
-
- int hang, rxdesc, bad_packet, some_counter;
- u32 txgo;
- struct lmc_regfile_t lmc_csrs;
- volatile u32 lmc_txtick;
- volatile u32 lmc_rxtick;
- u32 lmc_flags;
- u32 lmc_intrmask; /* our copy of csr_intr */
- u32 lmc_cmdmode; /* our copy of csr_cmdmode */
- u32 lmc_busmode; /* our copy of csr_busmode */
- u32 lmc_gpio_io; /* state of in/out settings */
- u32 lmc_gpio; /* state of outputs */
- struct sk_buff* lmc_txq[LMC_TXDESCS];
- struct sk_buff* lmc_rxq[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_rxring[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_txring[LMC_TXDESCS];
- unsigned int lmc_next_rx, lmc_next_tx;
- volatile
- unsigned int lmc_taint_tx, lmc_taint_rx;
- int lmc_tx_start, lmc_txfull;
- int lmc_txbusy;
- u16 lmc_miireg16;
- int lmc_ok;
- int last_link_status;
- int lmc_cardtype;
- u32 last_frameerr;
- lmc_media_t *lmc_media;
- struct timer_list timer;
- lmc_ctl_t ictl;
- u32 TxDescriptControlInit;
-
- int tx_TimeoutInd; /* additional driver state */
- int tx_TimeoutDisplay;
- unsigned int lastlmc_taint_tx;
- int lasttx_packets;
- u32 tx_clockState;
- u32 lmc_crcSize;
- LMC_XINFO lmc_xinfo;
- char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
- char lmc_timing; /* for HSSI and SSI */
- int got_irq;
-
- char last_led_err[4];
-
- u32 last_int;
- u32 num_int;
-
- spinlock_t lmc_lock;
- u16 if_type; /* HDLC/PPP or NET */
-
- /* Failure cases */
- u8 failed_ring;
- u8 failed_recv_alloc;
-
- /* Structure check */
- u32 check;
-};
-
-#define LMC_PCI_TIME 1
-#define LMC_EXT_TIME 0
-
-#define PKT_BUF_SZ 1542 /* was 1536 */
-
-/* CSR5 settings */
-#define TIMER_INT 0x00000800
-#define TP_LINK_FAIL 0x00001000
-#define TP_LINK_PASS 0x00000010
-#define NORMAL_INT 0x00010000
-#define ABNORMAL_INT 0x00008000
-#define RX_JABBER_INT 0x00000200
-#define RX_DIED 0x00000100
-#define RX_NOBUFF 0x00000080
-#define RX_INT 0x00000040
-#define TX_FIFO_UNDER 0x00000020
-#define TX_JABBER 0x00000008
-#define TX_NOBUFF 0x00000004
-#define TX_DIED 0x00000002
-#define TX_INT 0x00000001
-
-/* CSR6 settings */
-#define OPERATION_MODE 0x00000200 /* Full Duplex */
-#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
-#define RECEIVE_ALL 0x40000000 /* Receive All */
-#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
-
-/* Dec control registers CSR6 as well */
-#define LMC_DEC_ST 0x00002000
-#define LMC_DEC_SR 0x00000002
-
-/* CSR15 settings */
-#define RECV_WATCHDOG_DISABLE 0x00000010
-#define JABBER_DISABLE 0x00000001
-
-/* More settings */
-/*
- * aSR6 -- Command (Operation Mode) Register
- */
-#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
-#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
-#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
-#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */
-#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
-#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
-#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
-#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */
-#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */
-#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */
-#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */
-
-#define TULIP_GP_PINSET 0x00000100L
-#define TULIP_BUSMODE_SWRESET 0x00000001L
-#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
-#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
-
-#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
-#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
-#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
-#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
-#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
-#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
-#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
-#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */
-#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */
-#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */
-#define TULIP_STS_TXNOBUF 0x00000004L
-#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */
-#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */
-
-#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */
-
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L
-
-#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */
-#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */
-#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */
-#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */
-#define TULIP_DSTS_RxMIIERR 0x00000008
-#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR)
-
-#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \
- | TULIP_STS_RXINTR \
- | TULIP_STS_TXINTR \
- | TULIP_STS_ABNRMLINTR \
- | TULIP_STS_SYSERROR \
- | TULIP_STS_TXSTOPPED \
- | TULIP_STS_TXUNDERFLOW\
- | TULIP_STS_RXSTOPPED )
-
-#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
-#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
-
-#ifndef TULIP_CMD_RECEIVEALL
-#define TULIP_CMD_RECEIVEALL 0x40000000L
-#endif
-
-/* Adapter module number */
-#define LMC_ADAP_HSSI 2
-#define LMC_ADAP_DS3 3
-#define LMC_ADAP_SSI 4
-#define LMC_ADAP_T1 5
-
-#define LMC_MTU 1500
-
-#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
-#define LMC_CRC_LEN_32 4
-
-#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
deleted file mode 100644
index eddd20aab691..000000000000
--- a/drivers/net/wan/sealevel.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Sealevel Systems 4021 driver.
- *
- * (c) Copyright 1999, 2001 Alan Cox
- * (c) Copyright 2001 Red Hat Inc.
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-struct slvl_device {
- struct z8530_channel *chan;
- int channel;
-};
-
-struct slvl_board {
- struct slvl_device dev[2];
- struct z8530_dev board;
- int iobase;
-};
-
- /* Network driver support routines */
-
-static inline struct slvl_device *dev_to_chan(struct net_device *dev)
-{
- return (struct slvl_device *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- netif_rx(skb);
-}
-
- /* We've been placed in the UP state */
-
-static int sealevel_open(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int err = -1;
- int unit = slvl->channel;
-
- /* Link layer up. */
-
- switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return err;
- }
-
- slvl->chan->rx_function = sealevel_input;
-
- netif_start_queue(d);
- return 0;
-}
-
-static int sealevel_close(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int unit = slvl->channel;
-
- /* Discard new frames */
-
- slvl->chan->rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind. */
-
-static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
-}
-
-static int sealevel_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops sealevel_ops = {
- .ndo_open = sealevel_open,
- .ndo_stop = sealevel_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
-{
- struct net_device *dev = alloc_hdlcdev(sv);
-
- if (!dev)
- return -1;
-
- dev_to_hdlc(dev)->attach = sealevel_attach;
- dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
- dev->netdev_ops = &sealevel_ops;
- dev->base_addr = iobase;
- dev->irq = irq;
-
- if (register_hdlc_device(dev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(dev);
- return -1;
- }
-
- sv->chan->netdevice = dev;
- return 0;
-}
-
-/* Allocate and setup Sealevel board. */
-
-static __init struct slvl_board *slvl_init(int iobase, int irq,
- int txdma, int rxdma, int slow)
-{
- struct z8530_dev *dev;
- struct slvl_board *b;
-
- /* Get the needed I/O space */
-
- if (!request_region(iobase, 8, "Sealevel 4021")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
- if (!b)
- goto err_kzalloc;
-
- b->dev[0].chan = &b->board.chanA;
- b->dev[0].channel = 0;
-
- b->dev[1].chan = &b->board.chanB;
- b->dev[1].channel = 1;
-
- dev = &b->board;
-
- /* Stuff in the I/O addressing */
-
- dev->active = 0;
-
- b->iobase = iobase;
-
- /* Select 8530 delays for the old board */
-
- if (slow)
- iobase |= Z8530_PORT_SLEEP;
-
- dev->chanA.ctrlio = iobase + 1;
- dev->chanA.dataio = iobase;
- dev->chanB.ctrlio = iobase + 3;
- dev->chanB.dataio = iobase + 2;
-
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
-
- /* Assert DTR enable DMA */
-
- outb(3 | (1 << 7), b->iobase + 4);
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "SeaLevel", dev) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_request_irq;
- }
-
- dev->irq = irq;
- dev->chanA.private = &b->dev[0];
- dev->chanB.private = &b->dev[1];
- dev->chanA.dev = dev;
- dev->chanB.dev = dev;
-
- dev->chanA.txdma = 3;
- dev->chanA.rxdma = 1;
- if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
- goto err_dma_tx;
-
- if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
- goto err_dma_rx;
-
- disable_irq(irq);
-
- /* Begin normal initialise */
-
- if (z8530_init(dev) != 0) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_hw;
- }
- if (dev->type == Z85C30) {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
- } else {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
- }
-
- /* Now we can take the IRQ */
-
- enable_irq(irq);
-
- if (slvl_setup(&b->dev[0], iobase, irq))
- goto free_hw;
- if (slvl_setup(&b->dev[1], iobase, irq))
- goto free_netdev0;
-
- z8530_describe(dev, "I/O", iobase);
- dev->active = 1;
- return b;
-
-free_netdev0:
- unregister_hdlc_device(b->dev[0].chan->netdevice);
- free_netdev(b->dev[0].chan->netdevice);
-free_hw:
- free_dma(dev->chanA.rxdma);
-err_dma_rx:
- free_dma(dev->chanA.txdma);
-err_dma_tx:
- free_irq(irq, dev);
-err_request_irq:
- kfree(b);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void __exit slvl_shutdown(struct slvl_board *b)
-{
- int u;
-
- z8530_shutdown(&b->board);
-
- for (u = 0; u < 2; u++) {
- struct net_device *d = b->dev[u].chan->netdevice;
-
- unregister_hdlc_device(d);
- free_netdev(d);
- }
-
- free_irq(b->board.irq, &b->board);
- free_dma(b->board.chanA.rxdma);
- free_dma(b->board.chanA.txdma);
- /* DMA off on the card, drop DTR */
- outb(0, b->iobase);
- release_region(b->iobase, 8);
- kfree(b);
-}
-
-static int io = 0x238;
-static int txdma = 1;
-static int rxdma = 3;
-static int irq = 5;
-static bool slow;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
-module_param_hw(txdma, int, dma, 0);
-MODULE_PARM_DESC(txdma, "Transmit DMA channel");
-module_param_hw(rxdma, int, dma, 0);
-MODULE_PARM_DESC(rxdma, "Receive DMA channel");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
-module_param(slow, bool, 0);
-MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
-
-static struct slvl_board *slvl_unit;
-
-static int __init slvl_init_module(void)
-{
- slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
-
- return slvl_unit ? 0 : -ENODEV;
-}
-
-static void __exit slvl_cleanup_module(void)
-{
- if (slvl_unit)
- slvl_shutdown(slvl_unit);
-}
-
-module_init(slvl_init_module);
-module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
deleted file mode 100644
index 982a03488a00..000000000000
--- a/drivers/net/wan/z85230.c
+++ /dev/null
@@ -1,1641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (c) Copyright 2000, 2001 Red Hat Inc
- *
- * Development of this driver was funded by Equiinet Ltd
- * http://www.equiinet.com
- *
- * ChangeLog:
- *
- * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
- * unification of all the Z85x30 asynchronous drivers for real.
- *
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
- * boundary.
- *
- * Modified for SMP safety and SMP locking by Alan Cox
- * <alan@lxorguk.ukuu.org.uk>
- *
- * Performance
- *
- * Z85230:
- * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
- * X.25 is not unrealistic on all machines. DMA mode can in theory
- * handle T1/E1 quite nicely. In practice the limit seems to be about
- * 512Kbit->1Mbit depending on motherboard.
- *
- * Z85C30:
- * 64K will take DMA, 9600 baud X.25 should be ok.
- *
- * Z8530:
- * Synchronous mode without DMA is unlikely to pass about 2400 baud.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#define RT_LOCK
-#define RT_UNLOCK
-#include <linux/spinlock.h>
-
-#include "z85230.h"
-
-/**
- * z8530_read_port - Architecture specific interface function
- * @p: port to read
- *
- * Provided port access methods. The Comtrol SV11 requires no delays
- * between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- *
- * The caller must hold sufficient locks to avoid violating the horrible
- * 5uS delay rule.
- */
-
-static inline int z8530_read_port(unsigned long p)
-{
- u8 r = inb(Z8530_PORT_OF(p));
-
- if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
- udelay(5);
- return r;
-}
-
-/**
- * z8530_write_port - Architecture specific interface function
- * @p: port to write
- * @d: value to write
- *
- * Write a value to a port with delays if need be. Note that the
- * caller must hold locks to avoid read/writes from other contexts
- * violating the 5uS rule
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- */
-
-static inline void z8530_write_port(unsigned long p, u8 d)
-{
- outb(d, Z8530_PORT_OF(p));
- if (p & Z8530_PORT_SLEEP)
- udelay(5);
-}
-
-static void z8530_rx_done(struct z8530_channel *c);
-static void z8530_tx_done(struct z8530_channel *c);
-
-/**
- * read_zsreg - Read a register from a Z85230
- * @c: Z8530 channel to read from (2 per chip)
- * @reg: Register to read
- * FIXME: Use a spinlock.
- *
- * Most of the Z8530 registers are indexed off the control registers.
- * A read is done by writing to the control register and reading the
- * register back. The caller must hold the lock
- */
-
-static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- return z8530_read_port(c->ctrlio);
-}
-
-/**
- * read_zsdata - Read the data port of a Z8530 channel
- * @c: The Z8530 channel to read the data port from
- *
- * The data port provides fast access to some things. We still
- * have all the 5uS delays to worry about.
- */
-
-static inline u8 read_zsdata(struct z8530_channel *c)
-{
- u8 r;
-
- r = z8530_read_port(c->dataio);
- return r;
-}
-
-/**
- * write_zsreg - Write to a Z8530 channel register
- * @c: The Z8530 channel
- * @reg: Register number
- * @val: Value to write
- *
- * Write a value to an indexed register. The caller must hold the lock
- * to honour the irritating delay rules. We know about register 0
- * being fast to access.
- *
- * Assumes c->lock is held.
- */
-static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsctrl - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the control register on the Z8530
- */
-
-static inline void write_zsctrl(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsdata - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the data register on the Z8530
- */
-static inline void write_zsdata(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->dataio, val);
-}
-
-/* Register loading parameters for a dead port
- */
-
-u8 z8530_dead_port[] = {
- 255
-};
-EXPORT_SYMBOL(z8530_dead_port);
-
-/* Register loading parameters for currently supported circuit types
- */
-
-/* Data clocked by telco end. This is the correct data for the UK
- * "kilostream" service, and most other similar services.
- */
-
-u8 z8530_hdlc_kilostream[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream);
-
-/* As above but for enhanced chips.
- */
-
-u8 z8530_hdlc_kilostream_85230[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 23, 3, /* Extended mode AUTO TX and EOM*/
-
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
-
-/**
- * z8530_flush_fifo - Flush on chip RX FIFO
- * @c: Channel to flush
- *
- * Flush the receive FIFO. There is no specific option for this, we
- * blindly read bytes and discard them. Reading when there is no data
- * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
- * All locking is handled for the caller. On return data may still be
- * present if it arrived during the flush.
- */
-
-static void z8530_flush_fifo(struct z8530_channel *c)
-{
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- if (c->dev->type == Z85230) {
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- }
-}
-
-/**
- * z8530_rtsdtr - Control the outgoing DTS/RTS line
- * @c: The Z8530 channel to control;
- * @set: 1 to set, 0 to clear
- *
- * Sets or clears DTR/RTS on the requested line. All locking is handled
- * by the caller. For now we assume all boards use the actual RTS/DTR
- * on the chip. Apparently one or two don't. We'll scream about them
- * later.
- */
-
-static void z8530_rtsdtr(struct z8530_channel *c, int set)
-{
- if (set)
- c->regs[5] |= (RTS | DTR);
- else
- c->regs[5] &= ~(RTS | DTR);
- write_zsreg(c, R5, c->regs[5]);
-}
-
-/**
- * z8530_rx - Handle a PIO receive event
- * @c: Z8530 channel to process
- *
- * Receive handler for receiving in PIO mode. This is much like the
- * async one but not quite the same or as complex
- *
- * Note: Its intended that this handler can easily be separated from
- * the main code to run realtime. That'll be needed for some machines
- * (eg to ever clock 64kbits on a sparc ;)).
- *
- * The RT_LOCK macros don't do anything now. Keep the code covered
- * by them as short as possible in all circumstances - clocks cost
- * baud. The interrupt handler is assumed to be atomic w.r.t. to
- * other code - this is true in the RT case too.
- *
- * We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
- * ISA DMA
- *
- * Called with the device lock held
- */
-
-static void z8530_rx(struct z8530_channel *c)
-{
- u8 ch, stat;
-
- while (1) {
- /* FIFO empty ? */
- if (!(read_zsreg(c, R0) & 1))
- break;
- ch = read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- /* Overrun ?
- */
- if (c->count < c->max) {
- *c->dptr++ = ch;
- c->count++;
- }
-
- if (stat & END_FR) {
- /* Error ?
- */
- if (stat & (Rx_OVR | CRC_ERR)) {
- /* Rewind the buffer and return */
- if (c->skb)
- c->dptr = c->skb->data;
- c->count = 0;
- if (stat & Rx_OVR) {
- pr_warn("%s: overrun\n", c->dev->name);
- c->rx_overrun++;
- }
- if (stat & CRC_ERR) {
- c->rx_crc_err++;
- /* printk("crc error\n"); */
- }
- /* Shove the frame upstream */
- } else {
- /* Drop the lock for RX processing, or
- * there are deadlocks
- */
- z8530_rx_done(c);
- write_zsctrl(c, RES_Rx_CRC);
- }
- }
- }
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx - Handle a PIO transmit event
- * @c: Z8530 channel to process
- *
- * Z8530 transmit interrupt handler for the PIO mode. The basic
- * idea is to attempt to keep the FIFO fed. We fill as many bytes
- * in as possible, its quite possible that we won't keep up with the
- * data rate otherwise.
- */
-
-static void z8530_tx(struct z8530_channel *c)
-{
- while (c->txcount) {
- /* FIFO full ? */
- if (!(read_zsreg(c, R0) & 4))
- return;
- c->txcount--;
- /* Shovel out the byte
- */
- write_zsreg(c, R8, *c->tx_ptr++);
- write_zsctrl(c, RES_H_IUS);
- /* We are about to underflow */
- if (c->txcount == 0) {
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- }
- }
-
- /* End of frame TX - fire another one
- */
-
- write_zsctrl(c, RES_Tx_P);
-
- z8530_tx_done(c);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status - Handle a PIO status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred in PIO synchronous mode. There are several
- * reasons the chip will bother us here. A transmit underrun means we
- * failed to feed the chip fast enough and just broke a packet. A DCD
- * change is a line up or down.
- */
-
-static void z8530_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (status & TxEOM) {
-/* printk("%s: Tx underrun.\n", chan->dev->name); */
- chan->netdevice->stats.tx_fifo_errors++;
- write_zsctrl(chan, ERR_RES);
- z8530_tx_done(chan);
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_sync = {
- .rx = z8530_rx,
- .tx = z8530_tx,
- .status = z8530_status,
-};
-EXPORT_SYMBOL(z8530_sync);
-
-/**
- * z8530_dma_rx - Handle a DMA RX event
- * @chan: Channel to handle
- *
- * Non bus mastering DMA interfaces for the Z8x30 devices. This
- * is really pretty PC specific. The DMA mode means that most receive
- * events are handled by the DMA hardware. We get a kick here only if
- * a frame ended.
- */
-
-static void z8530_dma_rx(struct z8530_channel *chan)
-{
- if (chan->rxdma_on) {
- /* Special condition check only */
- u8 status;
-
- read_zsreg(chan, R7);
- read_zsreg(chan, R6);
-
- status = read_zsreg(chan, R1);
-
- if (status & END_FR)
- z8530_rx_done(chan); /* Fire up the next one */
-
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_H_IUS);
- } else {
- /* DMA is off right now, drain the slow way */
- z8530_rx(chan);
- }
-}
-
-/**
- * z8530_dma_tx - Handle a DMA TX event
- * @chan: The Z8530 channel to handle
- *
- * We have received an interrupt while doing DMA transmissions. It
- * shouldn't happen. Scream loudly if it does.
- */
-static void z8530_dma_tx(struct z8530_channel *chan)
-{
- if (!chan->dma_tx) {
- pr_warn("Hey who turned the DMA off?\n");
- z8530_tx(chan);
- return;
- }
- /* This shouldn't occur in DMA mode */
- pr_err("DMA tx - bogus event!\n");
- z8530_tx(chan);
-}
-
-/**
- * z8530_dma_status - Handle a DMA status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred on the Z8530. We receive these for two reasons
- * when in DMA mode. Firstly if we finished a packet transfer we get one
- * and kick the next packet out. Secondly we may see a DCD change.
- *
- */
-static void z8530_dma_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (chan->dma_tx) {
- if (status & TxEOM) {
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on = 0;
- release_dma_lock(flags);
- z8530_tx_done(chan);
- }
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
-
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-static struct z8530_irqhandler z8530_dma_sync = {
- .rx = z8530_dma_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-static struct z8530_irqhandler z8530_txdma_sync = {
- .rx = z8530_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-/**
- * z8530_rx_clear - Handle RX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_rx_clear(struct z8530_channel *c)
-{
- /* Data and status bytes
- */
- u8 stat;
-
- read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- if (stat & END_FR)
- write_zsctrl(c, RES_Rx_CRC);
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx_clear - Handle TX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_tx_clear(struct z8530_channel *c)
-{
- write_zsctrl(c, RES_Tx_P);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status_clear - Handle status events from a stopped chip
- * @chan: Z8530 channel to shut up
- *
- * Status interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_status_clear(struct z8530_channel *chan)
-{
- u8 status = read_zsreg(chan, R0);
-
- if (status & TxEOM)
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_nop = {
- .rx = z8530_rx_clear,
- .tx = z8530_tx_clear,
- .status = z8530_status_clear,
-};
-EXPORT_SYMBOL(z8530_nop);
-
-/**
- * z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
- * @dev_id: The Z8530 device that is interrupting.
- *
- * A Z85[2]30 device has stuck its hand in the air for attention.
- * We scan both the channels on the chip for events and then call
- * the channel specific call backs for each channel that has events.
- * We have to use callback functions because the two channels can be
- * in different modes.
- *
- * Locking is done for the handlers. Note that locking is done
- * at the chip level (the 5uS delay issue is per chip not per
- * channel). c->lock for both channels points to dev->lock
- */
-
-irqreturn_t z8530_interrupt(int irq, void *dev_id)
-{
- struct z8530_dev *dev = dev_id;
- u8 intr;
- static volatile int locker=0;
- int work = 0;
- struct z8530_irqhandler *irqs;
-
- if (locker) {
- pr_err("IRQ re-enter\n");
- return IRQ_NONE;
- }
- locker = 1;
-
- spin_lock(&dev->lock);
-
- while (++work < 5000) {
- intr = read_zsreg(&dev->chanA, R3);
- if (!(intr &
- (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
- break;
-
- /* This holds the IRQ status. On the 8530 you must read it
- * from chan A even though it applies to the whole chip
- */
-
- /* Now walk the chip and see what it is wanting - it may be
- * an IRQ for someone else remember
- */
-
- irqs = dev->chanA.irqs;
-
- if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
- if (intr & CHARxIP)
- irqs->rx(&dev->chanA);
- if (intr & CHATxIP)
- irqs->tx(&dev->chanA);
- if (intr & CHAEXT)
- irqs->status(&dev->chanA);
- }
-
- irqs = dev->chanB.irqs;
-
- if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
- if (intr & CHBRxIP)
- irqs->rx(&dev->chanB);
- if (intr & CHBTxIP)
- irqs->tx(&dev->chanB);
- if (intr & CHBEXT)
- irqs->status(&dev->chanB);
- }
- }
- spin_unlock(&dev->lock);
- if (work == 5000)
- pr_err("%s: interrupt jammed - abort(0x%X)!\n",
- dev->name, intr);
- /* Ok all done */
- locker = 0;
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(z8530_interrupt);
-
-static const u8 reg_init[16] = {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0x55, 0, 0, 0
-};
-
-/**
- * z8530_sync_open - Open a Z8530 channel for PIO
- * @dev: The network interface we are using
- * @c: The Z8530 channel to open in synchronous PIO mode
- *
- * Switch a Z8530 into synchronous mode without DMA assist. We
- * raise the RTS/DTR and commence network operation.
- */
-int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
- c->irqs = &z8530_sync;
-
- /* This loads the double buffer up */
- z8530_rx_done(c); /* Load the frame ring */
- z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c, 1);
- c->dma_tx = 0;
- c->regs[R1] |= TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_open);
-
-/**
- * z8530_sync_close - Close a PIO Z8530 channel
- * @dev: Network device to close
- * @c: Z8530 channel to disassociate and move to idle
- *
- * Close down a Z8530 interface and switch its interrupt handlers
- * to discard future events.
- */
-int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_close);
-
-/**
- * z8530_sync_dma_open - Open a Z8530 for DMA I/O
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA in both directions. Two
- * ISA DMA channels must be available for this to work. We assume ISA
- * DMA driven I/O and PC limits on access.
- */
-int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Load the DMA interfaces up
- */
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->rx_buf[0])
- return -ENOBUFS;
- c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- return -ENOBUFS;
- }
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_used = 0;
- c->dma_tx = 1;
- c->dma_num = 0;
- c->dma_ready = 1;
-
- /* Enable DMA control mode
- */
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* TX DMA via DIR/REQ
- */
-
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* RX DMA via W/Req
- */
-
- c->regs[R1] |= WT_FN_RDYFN;
- c->regs[R1] |= WT_RDY_RT;
- c->regs[R1] |= INT_ERR_Rx;
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] |= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* DMA interrupts
- */
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
- set_dma_count(c->rxdma, c->mtu);
- enable_dma(c->rxdma);
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 1;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_open);
-
-/**
- * z8530_sync_dma_close - Close down DMA I/O
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA mode synchronous interface. Halt the DMA, and
- * free the buffers.
- */
-int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- flags = claim_dma_lock();
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
-
- c->rxdma_on = 0;
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- release_dma_lock(flags);
-
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- spin_lock_irqsave(c->lock, flags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->rx_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- }
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_close);
-
-/**
- * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA transmission. One
- * ISA DMA channel must be available for this to work. The receive
- * side is run in PIO mode, but then it has the bigger FIFO.
- */
-
-int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- printk("Opening sync interface for TX-DMA\n");
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0])
- return -ENOBUFS;
-
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* Load the PIO receive ring
- */
-
- z8530_rx_done(c);
- z8530_rx_done(c);
-
- /* Load the DMA interfaces up
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- c->tx_dma_used = 0;
- c->dma_num = 0;
- c->dma_ready = 1;
- c->dma_tx = 1;
-
- /* Enable DMA control mode
- */
-
- /* TX DMA via DIR/REQ
- */
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_open);
-
-/**
- * z8530_sync_txdma_close - Close down a TX driven DMA channel
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
- * and free the buffers.
- */
-
-int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long dflags, cflags;
- u8 chk;
-
- spin_lock_irqsave(c->lock, cflags);
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- release_dma_lock(dflags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, cflags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
- * it exists...
- */
-static const char * const z8530_type_name[] = {
- "Z8530",
- "Z85C30",
- "Z85230"
-};
-
-/**
- * z8530_describe - Uniformly describe a Z8530 port
- * @dev: Z8530 device to describe
- * @mapping: string holding mapping type (eg "I/O" or "Mem")
- * @io: the port value in question
- *
- * Describe a Z8530 in a standard format. We must pass the I/O as
- * the port offset isn't predictable. The main reason for this function
- * is to try and get a common format of report.
- */
-
-void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
-{
- pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
- z8530_type_name[dev->type],
- mapping,
- Z8530_PORT_OF(io),
- dev->irq);
-}
-EXPORT_SYMBOL(z8530_describe);
-
-/* Locked operation part of the z8530 init code
- */
-static inline int do_z8530_init(struct z8530_dev *dev)
-{
- /* NOP the interrupt handlers first - we might get a
- * floating IRQ transition when we reset the chip
- */
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- dev->chanA.dcdcheck = DCD;
- dev->chanB.dcdcheck = DCD;
-
- /* Reset the chip */
- write_zsreg(&dev->chanA, R9, 0xC0);
- udelay(200);
- /* Now check its valid */
- write_zsreg(&dev->chanA, R12, 0xAA);
- if (read_zsreg(&dev->chanA, R12) != 0xAA)
- return -ENODEV;
- write_zsreg(&dev->chanA, R12, 0x55);
- if (read_zsreg(&dev->chanA, R12) != 0x55)
- return -ENODEV;
-
- dev->type = Z8530;
-
- /* See the application note.
- */
-
- write_zsreg(&dev->chanA, R15, 0x01);
-
- /* If we can set the low bit of R15 then
- * the chip is enhanced.
- */
-
- if (read_zsreg(&dev->chanA, R15) == 0x01) {
- /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
- /* Put a char in the fifo */
- write_zsreg(&dev->chanA, R8, 0);
- if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
- dev->type = Z85230; /* Has a FIFO */
- else
- dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
- }
-
- /* The code assumes R7' and friends are
- * off. Use write_zsext() for these and keep
- * this bit clear.
- */
-
- write_zsreg(&dev->chanA, R15, 0);
-
- /* At this point it looks like the chip is behaving
- */
-
- memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init, 16);
-
- return 0;
-}
-
-/**
- * z8530_init - Initialise a Z8530 device
- * @dev: Z8530 device to initialise.
- *
- * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
- * is present, identify the type and then program it to hopefully
- * keep quite and behave. This matters a lot, a Z8530 in the wrong
- * state will sometimes get into stupid modes generating 10Khz
- * interrupt streams and the like.
- *
- * We set the interrupt handler up to discard any events, in case
- * we get them during reset or setp.
- *
- * Return 0 for success, or a negative value indicating the problem
- * in errno form.
- */
-
-int z8530_init(struct z8530_dev *dev)
-{
- unsigned long flags;
- int ret;
-
- /* Set up the chip level lock */
- spin_lock_init(&dev->lock);
- dev->chanA.lock = &dev->lock;
- dev->chanB.lock = &dev->lock;
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = do_z8530_init(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(z8530_init);
-
-/**
- * z8530_shutdown - Shutdown a Z8530 device
- * @dev: The Z8530 chip to shutdown
- *
- * We set the interrupt handlers to silence any interrupts. We then
- * reset the chip and wait 100uS to be sure the reset completed. Just
- * in case the caller then tries to do stuff.
- *
- * This is called without the lock held
- */
-int z8530_shutdown(struct z8530_dev *dev)
-{
- unsigned long flags;
- /* Reset the chip */
-
- spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- write_zsreg(&dev->chanA, R9, 0xC0);
- /* We must lock the udelay, the chip is offlimits here */
- udelay(100);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_shutdown);
-
-/**
- * z8530_channel_load - Load channel data
- * @c: Z8530 channel to configure
- * @rtable: table of register, value pairs
- * FIXME: ioctl to allow user uploaded tables
- *
- * Load a Z8530 channel up from the system data. We use +16 to
- * indicate the "prime" registers. The value 255 terminates the
- * table.
- */
-
-int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- while (*rtable != 255) {
- int reg = *rtable++;
-
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] | 1);
- write_zsreg(c, reg & 0x0F, *rtable);
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] & ~1);
- c->regs[reg] = *rtable++;
- }
- c->rx_function = z8530_null_rx;
- c->skb = NULL;
- c->tx_skb = NULL;
- c->tx_next_skb = NULL;
- c->mtu = 1500;
- c->max = 0;
- c->count = 0;
- c->status = read_zsreg(c, R0);
- c->sync = 1;
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_channel_load);
-
-/**
- * z8530_tx_begin - Begin packet transmission
- * @c: The Z8530 channel to kick
- *
- * This is the speed sensitive side of transmission. If we are called
- * and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
- *
- * Note: We are handling this code path in the interrupt path, keep it
- * fast or bad things will happen.
- *
- * Called with the lock held.
- */
-
-static void z8530_tx_begin(struct z8530_channel *c)
-{
- unsigned long flags;
-
- if (c->tx_skb)
- return;
-
- c->tx_skb = c->tx_next_skb;
- c->tx_next_skb = NULL;
- c->tx_ptr = c->tx_next_ptr;
-
- if (!c->tx_skb) {
- /* Idle on */
- if (c->dma_tx) {
- flags = claim_dma_lock();
- disable_dma(c->txdma);
- /* Check if we crapped out.
- */
- if (get_dma_residue(c->txdma)) {
- c->netdevice->stats.tx_dropped++;
- c->netdevice->stats.tx_fifo_errors++;
- }
- release_dma_lock(flags);
- }
- c->txcount = 0;
- } else {
- c->txcount = c->tx_skb->len;
-
- if (c->dma_tx) {
- /* FIXME. DMA is broken for the original 8530,
- * on the older parts we need to set a flag and
- * wait for a further TX interrupt to fire this
- * stage off
- */
-
- flags = claim_dma_lock();
- disable_dma(c->txdma);
-
- /* These two are needed by the 8530/85C30
- * and must be issued when idling.
- */
- if (c->dev->type != Z85230) {
- write_zsctrl(c, RES_Tx_CRC);
- write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- clear_dma_ff(c->txdma);
- set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
- set_dma_count(c->txdma, c->txcount);
- enable_dma(c->txdma);
- release_dma_lock(flags);
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5] | TxENAB);
- } else {
- /* ABUNDER off */
- write_zsreg(c, R10, c->regs[10]);
- write_zsctrl(c, RES_Tx_CRC);
-
- while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
- write_zsreg(c, R8, *c->tx_ptr++);
- c->txcount--;
- }
- }
- }
- /* Since we emptied tx_skb we can ask for more
- */
- netif_wake_queue(c->netdevice);
-}
-
-/**
- * z8530_tx_done - TX complete callback
- * @c: The channel that completed a transmit.
- *
- * This is called when we complete a packet send. We wake the queue,
- * start the next packet going and then free the buffer of the existing
- * packet. This code is fairly timing sensitive.
- *
- * Called with the register lock held.
- */
-
-static void z8530_tx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
-
- /* Actually this can happen.*/
- if (!c->tx_skb)
- return;
-
- skb = c->tx_skb;
- c->tx_skb = NULL;
- z8530_tx_begin(c);
- c->netdevice->stats.tx_packets++;
- c->netdevice->stats.tx_bytes += skb->len;
- dev_consume_skb_irq(skb);
-}
-
-/**
- * z8530_null_rx - Discard a packet
- * @c: The channel the packet arrived on
- * @skb: The buffer
- *
- * We point the receive handler at this function when idle. Instead
- * of processing the frames we get to throw them away.
- */
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
-{
- dev_kfree_skb_any(skb);
-}
-EXPORT_SYMBOL(z8530_null_rx);
-
-/**
- * z8530_rx_done - Receive completion callback
- * @c: The channel that completed a receive
- *
- * A new packet is complete. Our goal here is to get back into receive
- * mode as fast as possible. On the Z85230 we could change to using
- * ESCC mode, but on the older chips we have no choice. We flip to the
- * new buffer immediately in DMA mode so that the DMA of the next
- * frame can occur while we are copying the previous buffer to an sk_buff
- *
- * Called with the lock held
- */
-static void z8530_rx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
- int ct;
-
- /* Is our receive engine in DMA mode
- */
- if (c->rxdma_on) {
- /* Save the ready state and the buffer currently
- * being used as the DMA target
- */
- int ready = c->dma_ready;
- unsigned char *rxb = c->rx_buf[c->dma_num];
- unsigned long flags;
-
- /* Complete this DMA. Necessary to find the length
- */
- flags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- c->rxdma_on = 0;
- ct = c->mtu - get_dma_residue(c->rxdma);
- if (ct < 0)
- ct = 2; /* Shit happens.. */
- c->dma_ready = 0;
-
- /* Normal case: the other slot is free, start the next DMA
- * into it immediately.
- */
-
- if (ready) {
- c->dma_num ^= 1;
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
- set_dma_count(c->rxdma, c->mtu);
- c->rxdma_on = 1;
- enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- * from passing
- */
- write_zsreg(c, R0, RES_Rx_CRC);
- } else {
- /* Can't occur as we dont reenable the DMA irq until
- * after the flip is done
- */
- netdev_warn(c->netdevice, "DMA flip overrun!\n");
- }
-
- release_dma_lock(flags);
-
- /* Shove the old buffer into an sk_buff. We can't DMA
- * directly into one on a PC - it might be above the 16Mb
- * boundary. Optimisation - we could check to see if we
- * can avoid the copy. Optimisation 2 - make the memcpy
- * a copychecksum.
- */
-
- skb = dev_alloc_skb(ct);
- if (!skb) {
- c->netdevice->stats.rx_dropped++;
- netdev_warn(c->netdevice, "Memory squeeze\n");
- } else {
- skb_put(skb, ct);
- skb_copy_to_linear_data(skb, rxb, ct);
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- c->dma_ready = 1;
- } else {
- RT_LOCK;
- skb = c->skb;
-
- /* The game we play for non DMA is similar. We want to
- * get the controller set up for the next packet as fast
- * as possible. We potentially only have one byte + the
- * fifo length for this. Thus we want to flip to the new
- * buffer and then mess around copying and allocating
- * things. For the current case it doesn't matter but
- * if you build a system where the sync irq isn't blocked
- * by the kernel IRQ disable then you need only block the
- * sync IRQ for the RT_LOCK area.
- *
- */
- ct = c->count;
-
- c->skb = c->skb2;
- c->count = 0;
- c->max = c->mtu;
- if (c->skb) {
- c->dptr = c->skb->data;
- c->max = c->mtu;
- } else {
- c->count = 0;
- c->max = 0;
- }
- RT_UNLOCK;
-
- c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2)
- skb_put(c->skb2, c->mtu);
-
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- /* If we received a frame we must now process it.
- */
- if (skb) {
- skb_trim(skb, ct);
- c->rx_function(c, skb);
- } else {
- c->netdevice->stats.rx_dropped++;
- netdev_err(c->netdevice, "Lost a frame\n");
- }
-}
-
-/**
- * spans_boundary - Check a packet can be ISA DMA'd
- * @skb: The buffer to check
- *
- * Returns true if the buffer cross a DMA boundary on a PC. The poor
- * thing can only DMA within a 64K block not across the edges of it.
- */
-
-static inline int spans_boundary(struct sk_buff *skb)
-{
- unsigned long a = (unsigned long)skb->data;
-
- a ^= (a + skb->len);
- if (a & 0x00010000) /* If the 64K bit is different.. */
- return 1;
- return 0;
-}
-
-/**
- * z8530_queue_xmit - Queue a packet
- * @c: The channel to use
- * @skb: The packet to kick down the channel
- *
- * Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
- * even in DMA mode we do the flip to DMA buffer if needed here
- * not in the IRQ.
- *
- * Called from the network code. The lock is not held at this
- * point.
- */
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
-{
- unsigned long flags;
-
- netif_stop_queue(c->netdevice);
- if (c->tx_next_skb)
- return NETDEV_TX_BUSY;
-
- /* PC SPECIFIC - DMA limits */
- /* If we will DMA the transmit and its gone over the ISA bus
- * limit, then copy to the flip buffer
- */
-
- if (c->dma_tx &&
- ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
- 16 * 1024 * 1024 || spans_boundary(skb))) {
- /* Send the flip buffer, and flip the flippy bit.
- * We don't care which is used when just so long as
- * we never use the same buffer twice in a row. Since
- * only one buffer can be going out at a time the other
- * has to be safe.
- */
- c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used ^= 1; /* Flip temp buffer */
- skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
- } else {
- c->tx_next_ptr = skb->data;
- }
- RT_LOCK;
- c->tx_next_skb = skb;
- RT_UNLOCK;
-
- spin_lock_irqsave(c->lock, flags);
- z8530_tx_begin(c);
- spin_unlock_irqrestore(c->lock, flags);
-
- return NETDEV_TX_OK;
-}
-EXPORT_SYMBOL(z8530_queue_xmit);
-
-/* Module support
- */
-static const char banner[] __initconst =
- KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
-
-static int __init z85230_init_driver(void)
-{
- printk(banner);
- return 0;
-}
-module_init(z85230_init_driver);
-
-static void __exit z85230_cleanup_driver(void)
-{
-}
-module_exit(z85230_cleanup_driver);
-
-MODULE_AUTHOR("Red Hat Inc.");
-MODULE_DESCRIPTION("Z85x30 synchronous driver core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
deleted file mode 100644
index 462cb620bc5d..000000000000
--- a/drivers/net/wan/z85230.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Description of Z8530 Z85C30 and Z85230 communications chips
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-
-#ifndef _Z8530_H
-#define _Z8530_H
-
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define FLAG 0x7e
-
-/* Write Register 0 */
-#define R0 0 /* Register selects */
-#define R1 1
-#define R2 2
-#define R3 3
-#define R4 4
-#define R5 5
-#define R6 6
-#define R7 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-#define RPRIME 16 /* Indicate a prime register access on 230 */
-
-#define NULLCODE 0 /* Null Code */
-#define POINT_HIGH 0x8 /* Select upper half of registers */
-#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
-#define SEND_ABORT 0x18 /* HDLC Abort */
-#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
-#define RES_Tx_P 0x28 /* Reset TxINT Pending */
-#define ERR_RES 0x30 /* Error Reset */
-#define RES_H_IUS 0x38 /* Reset highest IUS */
-
-#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
-#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
-#define RES_EOM_L 0xC0 /* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
-#define TxINT_ENAB 0x2 /* Tx Int Enable */
-#define PAR_SPEC 0x4 /* Parity is special condition */
-
-#define RxINT_DISAB 0 /* Rx Int Disable */
-#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
-#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
-#define INT_ERR_Rx 0x18 /* Int on error only */
-
-#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
-#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
-#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define RxENABLE 0x1 /* Rx Enable */
-#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
-#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
-#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
-#define ENT_HM 0x10 /* Enter Hunt Mode */
-#define AUTO_ENAB 0x20 /* Auto Enables */
-#define Rx5 0x0 /* Rx 5 Bits/Character */
-#define Rx7 0x40 /* Rx 7 Bits/Character */
-#define Rx6 0x80 /* Rx 6 Bits/Character */
-#define Rx8 0xc0 /* Rx 8 Bits/Character */
-
-/* Write Register 4 */
-
-#define PAR_ENA 0x1 /* Parity Enable */
-#define PAR_EVEN 0x2 /* Parity Even/Odd* */
-
-#define SYNC_ENAB 0 /* Sync Modes Enable */
-#define SB1 0x4 /* 1 stop bit/char */
-#define SB15 0x8 /* 1.5 stop bits/char */
-#define SB2 0xc /* 2 stop bits/char */
-
-#define MONSYNC 0 /* 8 Bit Sync character */
-#define BISYNC 0x10 /* 16 bit sync character */
-#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
-#define EXTSYNC 0x30 /* External Sync Mode */
-
-#define X1CLK 0x0 /* x1 clock mode */
-#define X16CLK 0x40 /* x16 clock mode */
-#define X32CLK 0x80 /* x32 clock mode */
-#define X64CLK 0xC0 /* x64 clock mode */
-
-/* Write Register 5 */
-
-#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
-#define RTS 0x2 /* RTS */
-#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
-#define TxENAB 0x8 /* Tx Enable */
-#define SND_BRK 0x10 /* Send Break */
-#define Tx5 0x0 /* Tx 5 bits (or less)/character */
-#define Tx7 0x20 /* Tx 7 bits/character */
-#define Tx6 0x40 /* Tx 6 bits/character */
-#define Tx8 0x60 /* Tx 8 bits/character */
-#define DTR 0x80 /* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define VIS 1 /* Vector Includes Status */
-#define NV 2 /* No Vector */
-#define DLC 4 /* Disable Lower Chain */
-#define MIE 8 /* Master Interrupt Enable */
-#define STATHI 0x10 /* Status high */
-#define NORESET 0 /* No reset on write to R9 */
-#define CHRB 0x40 /* Reset channel B */
-#define CHRA 0x80 /* Reset channel A */
-#define FHWRES 0xc0 /* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define BIT6 1 /* 6 bit/8bit sync */
-#define LOOPMODE 2 /* SDLC Loop mode */
-#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
-#define MARKIDLE 8 /* Mark/flag on idle */
-#define GAOP 0x10 /* Go active on poll */
-#define NRZ 0 /* NRZ mode */
-#define NRZI 0x20 /* NRZI mode */
-#define FM1 0x40 /* FM1 (transition = 1) */
-#define FM0 0x60 /* FM0 (transition = 0) */
-#define CRCPS 0x80 /* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define TRxCXT 0 /* TRxC = Xtal output */
-#define TRxCTC 1 /* TRxC = Transmit clock */
-#define TRxCBR 2 /* TRxC = BR Generator Output */
-#define TRxCDP 3 /* TRxC = DPLL output */
-#define TRxCOI 4 /* TRxC O/I */
-#define TCRTxCP 0 /* Transmit clock = RTxC pin */
-#define TCTRxCP 8 /* Transmit clock = TRxC pin */
-#define TCBR 0x10 /* Transmit clock = BR Generator output */
-#define TCDPLL 0x18 /* Transmit clock = DPLL output */
-#define RCRTxCP 0 /* Receive clock = RTxC pin */
-#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
-#define RCBR 0x40 /* Receive clock = BR Generator output */
-#define RCDPLL 0x60 /* Receive clock = DPLL output */
-#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define BRENABL 1 /* Baud rate generator enable */
-#define BRSRC 2 /* Baud rate generator source */
-#define DTRREQ 4 /* DTR/Request function */
-#define AUTOECHO 8 /* Auto Echo */
-#define LOOPBAK 0x10 /* Local loopback */
-#define SEARCH 0x20 /* Enter search mode */
-#define RMC 0x40 /* Reset missing clock */
-#define DISDPLL 0x60 /* Disable DPLL */
-#define SSBR 0x80 /* Set DPLL source = BR generator */
-#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
-#define SFMM 0xc0 /* Set FM mode */
-#define SNRZI 0xe0 /* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
-#define ZCIE 2 /* Zero count IE */
-#define FIFOE 4 /* Z85230 only */
-#define DCDIE 8 /* DCD IE */
-#define SYNCIE 0x10 /* Sync/hunt IE */
-#define CTSIE 0x20 /* CTS IE */
-#define TxUIE 0x40 /* Tx Underrun/EOM IE */
-#define BRKIE 0x80 /* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define Rx_CH_AV 0x1 /* Rx Character Available */
-#define ZCOUNT 0x2 /* Zero count */
-#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
-#define DCD 0x8 /* DCD */
-#define SYNC_HUNT 0x10 /* Sync/hunt */
-#define CTS 0x20 /* CTS */
-#define TxEOM 0x40 /* Tx underrun */
-#define BRK_ABRT 0x80 /* Break/Abort */
-
-/* Read Register 1 */
-#define ALL_SNT 0x1 /* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define RES3 0x8 /* 0/3 */
-#define RES4 0x4 /* 0/4 */
-#define RES5 0xc /* 0/5 */
-#define RES6 0x2 /* 0/6 */
-#define RES7 0xa /* 0/7 */
-#define RES8 0x6 /* 0/8 */
-#define RES18 0xe /* 1/8 */
-#define RES28 0x0 /* 2/8 */
-/* Special Rx Condition Interrupts */
-#define PAR_ERR 0x10 /* Parity error */
-#define Rx_OVR 0x20 /* Rx Overrun Error */
-#define CRC_ERR 0x40 /* CRC/Framing Error */
-#define END_FR 0x80 /* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
-#define CHBTxIP 0x2 /* Channel B Tx IP */
-#define CHBRxIP 0x4 /* Channel B Rx IP */
-#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
-#define CHATxIP 0x10 /* Channel A Tx IP */
-#define CHARxIP 0x20 /* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10 (misc status bits) */
-#define ONLOOP 2 /* On loop */
-#define LOOPSEND 0x10 /* Loop sending */
-#define CLK2MIS 0x40 /* Two clocks missing */
-#define CLK1MIS 0x80 /* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-
-/*
- * Interrupt handling functions for this SCC
- */
-
-struct z8530_channel;
-
-struct z8530_irqhandler
-{
- void (*rx)(struct z8530_channel *);
- void (*tx)(struct z8530_channel *);
- void (*status)(struct z8530_channel *);
-};
-
-/*
- * A channel of the Z8530
- */
-
-struct z8530_channel
-{
- struct z8530_irqhandler *irqs; /* IRQ handlers */
- /*
- * Synchronous
- */
- u16 count; /* Buyes received */
- u16 max; /* Most we can receive this frame */
- u16 mtu; /* MTU of the device */
- u8 *dptr; /* Pointer into rx buffer */
- struct sk_buff *skb; /* Buffer dptr points into */
- struct sk_buff *skb2; /* Pending buffer */
- u8 status; /* Current DCD */
- u8 dcdcheck; /* which bit to check for line */
- u8 sync; /* Set if in sync mode */
-
- u8 regs[32]; /* Register map for the chip */
- u8 pendregs[32]; /* Pending register values */
-
- struct sk_buff *tx_skb; /* Buffer being transmitted */
- struct sk_buff *tx_next_skb; /* Next transmit buffer */
- u8 *tx_ptr; /* Byte pointer into the buffer */
- u8 *tx_next_ptr; /* Next pointer to use */
- u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
- u8 tx_dma_used; /* Flip buffer usage toggler */
- u16 txcount; /* Count of bytes to transmit */
-
- void (*rx_function)(struct z8530_channel *, struct sk_buff *);
-
- /*
- * Sync DMA
- */
-
- u8 rxdma; /* DMA channels */
- u8 txdma;
- u8 rxdma_on; /* DMA active if flag set */
- u8 txdma_on;
- u8 dma_num; /* Buffer we are DMAing into */
- u8 dma_ready; /* Is the other buffer free */
- u8 dma_tx; /* TX is to use DMA */
- u8 *rx_buf[2]; /* The flip buffers */
-
- /*
- * System
- */
-
- struct z8530_dev *dev; /* Z85230 chip instance we are from */
- unsigned long ctrlio; /* I/O ports */
- unsigned long dataio;
-
- /*
- * For PC we encode this way.
- */
-#define Z8530_PORT_SLEEP 0x80000000
-#define Z8530_PORT_OF(x) ((x)&0xFFFF)
-
- u32 rx_overrun; /* Overruns - not done yet */
- u32 rx_crc_err;
-
- /*
- * Bound device pointers
- */
-
- void *private; /* For our owner */
- struct net_device *netdevice; /* Network layer device */
-
- spinlock_t *lock; /* Device lock */
-};
-
-/*
- * Each Z853x0 device.
- */
-
-struct z8530_dev
-{
- char *name; /* Device instance name */
- struct z8530_channel chanA; /* SCC channel A */
- struct z8530_channel chanB; /* SCC channel B */
- int type;
-#define Z8530 0 /* NMOS dinosaur */
-#define Z85C30 1 /* CMOS - better */
-#define Z85230 2 /* CMOS with real FIFO */
- int irq; /* Interrupt for the device */
- int active; /* Soft interrupt enable - the Mac doesn't
- always have a hard disable on its 8530s... */
- spinlock_t lock;
-};
-
-
-/*
- * Functions
- */
-
-extern u8 z8530_dead_port[];
-extern u8 z8530_hdlc_kilostream_85230[];
-extern u8 z8530_hdlc_kilostream[];
-irqreturn_t z8530_interrupt(int, void *);
-void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-int z8530_init(struct z8530_dev *);
-int z8530_shutdown(struct z8530_dev *);
-int z8530_sync_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-int z8530_channel_load(struct z8530_channel *, u8 *);
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
-
-
-/*
- * Standard interrupt vector sets
- */
-
-extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
-
-/*
- * Asynchronous Interfacing
- */
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-
-#define SERIAL_XMIT_SIZE 4096
-#define WAKEUP_CHARS 256
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-
-#endif /* !(_Z8530_H) */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 7add2002ff4c..cb1c15012dd0 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -28,9 +28,11 @@ source "drivers/net/wireless/intersil/Kconfig"
source "drivers/net/wireless/marvell/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/microchip/Kconfig"
+source "drivers/net/wireless/purelifi/Kconfig"
source "drivers/net/wireless/ralink/Kconfig"
source "drivers/net/wireless/realtek/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
+source "drivers/net/wireless/silabs/Kconfig"
source "drivers/net/wireless/st/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zydas/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 80b324499786..abf3e5c87ca7 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -13,9 +13,11 @@ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/
obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
+obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/
obj-$(CONFIG_WLAN_VENDOR_ST) += st/
obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 9cabd342d156..9f84a6fde0c2 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1160,7 +1160,7 @@ static int ar5523_get_wlan_mode(struct ar5523 *ar,
ar5523_info(ar, "STA not found!\n");
return WLAN_MODE_11b;
}
- sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
for (bit = 0; bit < band->n_bitrates; bit++) {
if (sta_rate_set & 1) {
@@ -1198,7 +1198,7 @@ static void ar5523_create_rateset(struct ar5523 *ar,
ar5523_info(ar, "STA not found. Cannot set rates\n");
sta_rate_set = bss_conf->basic_rates;
} else
- sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
@@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev)
return -ENOENT;
}
- txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ txblock = kzalloc(sizeof(*txblock), GFP_KERNEL);
if (!txblock)
goto out;
@@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev)
if (!fwbuf)
goto out_free_rxblock;
- memset(txblock, 0, sizeof(struct ar5523_fwblock));
txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
txblock->total = cpu_to_be32(fw->size);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ab8f77ae5e66..f0c615fa5614 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -728,20 +728,17 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
struct ath10k *ar;
struct ath10k_ahb *ar_ahb;
struct ath10k_pci *ar_pci;
- const struct of_device_id *of_id;
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
struct ath10k_bus_params bus_params = {};
- of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev);
+ if (!hw_rev) {
+ dev_err(&pdev->dev, "OF data missing\n");
return -EINVAL;
}
- hw_rev = (enum ath10k_hw_rev)of_id->data;
-
size = sizeof(*ar_pci) + sizeof(*ar_ahb);
ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
hw_rev, &ath10k_ahb_hif_ops);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 9e1f483e1362..2092bfd02cd1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -94,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -131,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -169,6 +171,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -202,6 +205,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -239,6 +243,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -276,6 +281,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -313,6 +319,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -354,6 +361,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -397,6 +405,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -447,6 +456,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -531,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -570,6 +582,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -600,6 +613,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true,
.credit_size_workaround = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -644,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -674,6 +689,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = true,
},
};
@@ -2442,6 +2458,7 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ struct ath10k_vif *arvif;
int ret;
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
@@ -2480,6 +2497,14 @@ static void ath10k_core_restart(struct work_struct *work)
ar->state = ATH10K_STATE_RESTARTING;
ath10k_halt(ar);
ath10k_scan_finish(ar);
+ if (ar->hw_params.hw_restart_disconnect) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
+
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 9f6680b3be0a..8bfabbcfdb14 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -59,9 +59,6 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
-/* NAPI poll budget */
-#define ATH10K_NAPI_BUDGET 64
-
/* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 5215a6816d71..93acf0dd580a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -633,6 +633,8 @@ struct ath10k_hw_params {
bool supports_peer_stats_info;
bool dynamic_sar_support;
+
+ bool hw_restart_disconnect;
};
struct htt_resp;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b11aaee8b8c0..06a51a48c1d9 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2251,7 +2251,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
- ratemask = sta->supp_rates[band];
+ ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2296,7 +2296,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -2335,7 +2335,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->peer_flags |= ar->wmi.peer_flags->ldbc;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->peer_flags |= ar->wmi.peer_flags->bw40;
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
@@ -2388,7 +2388,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss,
+ max_nss);
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2545,7 +2546,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_hw_params *hw = &ar->hw_params;
struct cfg80211_chan_def def;
@@ -2587,10 +2588,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= ar->wmi.peer_flags->bw80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->peer_flags |= ar->wmi.peer_flags->bw160;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -2604,7 +2605,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->peer_vht_rates.rx_mcs_set =
@@ -2684,15 +2685,15 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
@@ -2703,13 +2704,13 @@ static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
}
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
@@ -2736,15 +2737,15 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
@@ -2759,12 +2760,12 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
/*
* Check VHT first.
*/
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath10k_mac_get_phymode_vht(ar, sta);
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -3079,8 +3080,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
/* ap_sta must be accessed only within rcu section which must be left
* before calling ath10k_setup_peer_smps() which might sleep.
*/
- ht_cap = ap_sta->ht_cap;
- vht_cap = ap_sta->vht_cap;
+ ht_cap = ap_sta->deflink.ht_cap;
+ vht_cap = ap_sta->deflink.vht_cap;
ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
@@ -3278,7 +3279,7 @@ static int ath10k_station_assoc(struct ath10k *ar,
*/
if (!reassoc) {
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap);
+ &sta->deflink.ht_cap);
if (ret) {
ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4118,11 +4119,10 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
- if (peer)
+ if (peer) {
ath10k_warn(ar, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
-
- if (!peer) {
+ } else {
ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
peer_addr,
WMI_PEER_TYPE_DEFAULT);
@@ -5339,13 +5339,29 @@ err:
static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 opt;
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- if (!ar->hw_rfkill_on)
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on) {
+ /* If the current driver state is RESTARTING but not yet
+ * fully RESTARTED because of incoming suspend event,
+ * then ath10k_halt() is already called via
+ * ath10k_core_restart() and should not be called here.
+ */
+ if (ar->state != ATH10K_STATE_RESTARTING) {
+ ath10k_halt(ar);
+ } else {
+ /* Suspending here, because when in RESTARTING
+ * state, ath10k_core_stop() skips
+ * ath10k_wait_for_suspend().
+ */
+ opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
+ ath10k_wait_for_suspend(ar, opt);
+ }
+ }
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -6787,10 +6803,10 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
int ret = 0;
s16 txpwr;
- if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
- txpwr = sta->txpwr.power;
+ txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
@@ -6910,26 +6926,26 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
struct ieee80211_sta *sta,
u32 rate_ctrl_flag, u8 nss)
{
- if (nss > sta->rx_nss) {
+ if (nss > sta->deflink.rx_nss) {
ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
- nss, sta->rx_nss);
+ nss, sta->deflink.rx_nss);
return -EINVAL;
}
if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
- if (!sta->vht_cap.vht_supported) {
+ if (!sta->deflink.vht_cap.vht_supported) {
ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
- if (!sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ if (!sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) {
ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else {
- if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported)
+ if (sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported)
return -EINVAL;
}
@@ -8272,7 +8288,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
u8 rate = arvif->vht_pfr;
/* skip non vht and multiple rate peers */
- if (!sta->vht_cap.vht_supported || arvif->vht_num_rates != 1)
+ if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1)
return false;
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -8313,7 +8329,7 @@ static void ath10k_mac_clr_bitrate_mask_iter(void *data,
int err;
/* clear vht peers only */
- if (arsta->arvif != arvif || !sta->vht_cap.vht_supported)
+ if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported)
return;
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -8457,13 +8473,14 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_STA,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
sta->smps_mode);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
@@ -8478,7 +8495,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
- sta->bandwidth, sta->addr);
+ sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -8487,7 +8504,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->rx_nss;
+ arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 4d4e2f91e15c..bf1c938be7d0 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3216,7 +3216,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
- ATH10K_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 73693c66cef1..24283c02a5ef 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2532,7 +2532,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
- ATH10K_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 8328966a0471..607e8164bf98 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1243,7 +1243,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
- ATH10K_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3d98f19c6ec8..ad6471b21796 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -345,6 +345,12 @@ static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
ep->ep_ops.ep_rx_complete(ar, skb);
/* The RX complete handler now owns the skb... */
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
+ napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
+
return;
out_free_skb:
@@ -387,6 +393,7 @@ static int ath10k_usb_hif_start(struct ath10k *ar)
int i;
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ ath10k_core_napi_enable(ar);
ath10k_usb_start_recv_pipes(ar);
/* set the TX resource avail threshold for each TX pipe */
@@ -462,6 +469,7 @@ err:
static void ath10k_usb_hif_stop(struct ath10k *ar)
{
ath10k_usb_flush_all(ar);
+ ath10k_core_napi_sync_disable(ar);
}
static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
@@ -966,6 +974,20 @@ err:
return ret;
}
+static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
/* ath10k usb driver registered functions */
static int ath10k_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -992,6 +1014,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll,
+ NAPI_POLL_WEIGHT);
+
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
@@ -1013,6 +1038,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */
bus_params.chip_id = 0;
+ bus_params.hl_msdu_ids = true;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
@@ -1044,6 +1070,7 @@ static void ath10k_usb_remove(struct usb_interface *interface)
return;
ath10k_core_unregister(ar_usb->ar);
+ netif_napi_del(&ar_usb->ar->napi);
ath10k_usb_destroy(ar_usb->ar);
usb_put_dev(interface_to_usbdev(interface));
ath10k_core_destroy(ar_usb->ar);
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index c1fce4159f1f..cc47e0114595 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -17,13 +17,14 @@ ath11k-y += core.o \
peer.o \
dbring.o \
hw.o \
- wow.o
+ pcic.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+ath11k-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index f407d4af2074..050bda828966 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -12,6 +13,7 @@
#include "debug.h"
#include "hif.h"
#include <linux/remoteproc.h>
+#include "pcic.h"
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
@@ -23,18 +25,14 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,ipq6018-wifi",
.data = (void *)ATH11K_HW_IPQ6018_HW10,
},
+ { .compatible = "qcom,wcn6750-wifi",
+ .data = (void *)ATH11K_HW_WCN6750_HW10,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
-static const struct ath11k_bus_params ath11k_ahb_bus_params = {
- .mhi_support = false,
- .m3_fw_support = false,
- .fixed_bdf_addr = true,
- .fixed_mem_region = true,
-};
-
#define ATH11K_IRQ_CE0_OFFSET 4
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
@@ -134,6 +132,16 @@ enum ext_irq_num {
tcl2host_status_ring,
};
+static int
+ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.msi.irqs[vector];
+}
+
+static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+};
+
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
{
return ioread32(ab->mem + offset);
@@ -401,6 +409,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
int irq_idx;
int i;
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_free_irq(ab);
+
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -555,6 +566,9 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
int irq, irq_idx, i;
int ret;
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_config_irq(ab);
+
/* Configure CE irqs */
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
@@ -624,7 +638,7 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
return 0;
}
-static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
@@ -636,6 +650,20 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
.power_up = ath11k_ahb_power_up,
};
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
+ .start = ath11k_pcic_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+};
+
static int ath11k_core_get_rproc(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -658,12 +686,84 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ phys_addr_t msi_addr_pa;
+ dma_addr_t msi_addr_iova;
+ struct resource *res;
+ int int_prop;
+ int ret;
+ int i;
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath11k_err(ab, "failed to fetch msi_addr\n");
+ return -ENOENT;
+ }
+
+ msi_addr_pa = res->start;
+ msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(ab->dev, msi_addr_iova))
+ return -ENOMEM;
+
+ ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
+ ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
+
+ ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
+ if (ret)
+ return ret;
+
+ ab->pci.msi.ep_base_data = int_prop + 32;
+
+ for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res)
+ return -ENODEV;
+
+ ab->pci.msi.irqs[i] = res->start;
+ }
+
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ void __iomem *mem;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_ahb_setup_msi_resources(ab);
+
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+
+ return 0;
+}
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
const struct of_device_id *of_id;
- struct resource *mem_res;
- void __iomem *mem;
+ const struct ath11k_hif_ops *hif_ops;
+ const struct ath11k_pci_ops *pci_ops;
+ enum ath11k_hw_rev hw_rev;
int ret;
of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
@@ -672,10 +772,21 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
- if (IS_ERR(mem)) {
- dev_err(&pdev->dev, "ioremap error\n");
- return PTR_ERR(mem);
+ hw_rev = (enum ath11k_hw_rev)of_id->data;
+
+ switch (hw_rev) {
+ case ATH11K_HW_IPQ8074:
+ case ATH11K_HW_IPQ6018_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_ipq8074;
+ pci_ops = NULL;
+ break;
+ case ATH11K_HW_WCN6750_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_wcn6750;
+ pci_ops = &ath11k_ahb_pci_ops_wcn6750;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
+ return -EOPNOTSUPP;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -685,20 +796,22 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
}
ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
- ATH11K_BUS_AHB,
- &ath11k_ahb_bus_params);
+ ATH11K_BUS_AHB);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
- ab->hif.ops = &ath11k_ahb_hif_ops;
+ ab->hif.ops = hif_ops;
+ ab->pci.ops = pci_ops;
ab->pdev = pdev;
- ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
- ab->mem = mem;
- ab->mem_len = resource_size(mem_res);
+ ab->hw_rev = hw_rev;
platform_set_drvdata(pdev, ab);
+ ret = ath11k_ahb_setup_resources(ab);
+ if (ret)
+ goto err_core_free;
+
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index aaa7b05ff49d..c14c51f38709 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@@ -918,9 +919,6 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
int i;
int ret;
- ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
- &ab->qmi.ce_cfg.shadow_reg_v2_len);
-
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 71eb7d04c3bf..01e1d494b527 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
@@ -95,11 +96,20 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 0,
+ .ce_window_idx = 0,
+ .fixed_fw_mem = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -161,11 +171,20 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 0,
+ .ce_window_idx = 0,
+ .fixed_fw_mem = false,
},
{
.name = "qca6390 hw2.0",
@@ -219,18 +238,27 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
- .supports_regdb = true,
+ .supports_regdb = false,
.fix_l1ss = true,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 0,
+ .ce_window_idx = 0,
+ .fixed_fw_mem = false,
},
{
.name = "qcn9074 hw1.0",
@@ -291,11 +319,20 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 3,
+ .ce_window_idx = 2,
+ .fixed_fw_mem = false,
},
{
.name = "wcn6855 hw2.0",
@@ -356,11 +393,20 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 0,
+ .ce_window_idx = 0,
+ .fixed_fw_mem = false,
},
{
.name = "wcn6855 hw2.1",
@@ -420,25 +466,119 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .dp_window_idx = 0,
+ .ce_window_idx = 0,
+ .fixed_fw_mem = false,
+ },
+ {
+ .name = "wcn6750 hw1.0",
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ .fw = {
+ .dir = "WCN6750/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6750_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = false,
+ .regs = &wcn6750_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .cold_boot_calib = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = true,
+ .dp_window_idx = 1,
+ .ce_window_idx = 2,
+ .fixed_fw_mem = true,
},
};
+static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
+{
+ WARN_ON(!ab->hw_params.single_pdev_only);
+
+ return &ab->pdevs[0];
+}
+
int ath11k_core_suspend(struct ath11k_base *ab)
{
int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
- /* TODO: there can frames in queues so for now add delay as a hack.
- * Need to implement to handle and remove this delay.
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
*/
- msleep(500);
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
@@ -447,6 +587,12 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_wow_enable(ab);
if (ret) {
ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
@@ -479,10 +625,20 @@ EXPORT_SYMBOL(ath11k_core_suspend);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
+ /* so far signle_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
ret = ath11k_hif_resume(ab);
if (ret) {
ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
@@ -509,6 +665,97 @@ int ath11k_core_resume(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_core_resume);
+static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath11k_base *ab = data;
+ const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
+ struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
+ ssize_t copied;
+ size_t len;
+ int i;
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ return;
+
+ if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH11K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff;
+ ab->new_alpha2[1] = smbios->cc_code & 0xff;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH11K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios worldwide regdomain\n");
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!smbios->bdf_enabled) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ len = min_t(size_t,
+ strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
+ for (i = 0; i < len; i++) {
+ if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic prefix */
+ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
+ sizeof(ab->qmi.target.bdf_ext));
+ if (copied < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate\n");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
+}
+
+int ath11k_core_check_smbios(struct ath11k_base *ab)
+{
+ ab->qmi.target.bdf_ext[0] = '\0';
+ dmi_walk(ath11k_core_check_cc_code_bdfext, ab);
+
+ if (ab->qmi.target.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
int ath11k_core_check_dt(struct ath11k_base *ab)
{
size_t max_len = sizeof(ab->qmi.target.bdf_ext);
@@ -532,13 +779,13 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
return 0;
}
-static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
- size_t name_len)
+static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len, bool with_variant)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
@@ -568,6 +815,18 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
return 0;
}
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, true);
+}
+
+static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false);
+}
+
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *file)
{
@@ -602,7 +861,9 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath11k_fw_ie *hdr;
bool name_match_found;
@@ -612,7 +873,7 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
name_match_found = false;
- /* go through ATH11K_BD_IE_BOARD_ elements */
+ /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath11k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -623,48 +884,50 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
+ ath11k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath11k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH11K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath11k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH11K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath11k_warn(ab, "unknown %s id found: %d\n",
+ ath11k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -681,7 +944,10 @@ out:
static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
struct ath11k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -746,22 +1012,23 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH11K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH11K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -771,8 +1038,9 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath11k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath11k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -803,24 +1071,52 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
#define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int ret;
- ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH11K_BOARD_API2_FILE;
+
+ ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath11k_err(ab, "failed to create board name: %d", ret);
return ret;
}
ab->bd_api = 2;
- ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath11k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
ab->bd_api = 1;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
if (ret) {
- ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath11k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params.fw.dir);
return ret;
}
@@ -832,13 +1128,32 @@ success:
int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
+ char boardname[BOARD_NAME_SIZE];
int ret;
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
if (ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
+exit:
+ if (!ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
+
return ret;
}
@@ -952,21 +1267,14 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
ath11k_debugfs_pdev_destroy(ab);
}
-static int ath11k_core_start(struct ath11k_base *ab,
- enum ath11k_firmware_mode mode)
+static int ath11k_core_start(struct ath11k_base *ab)
{
int ret;
- ret = ath11k_qmi_firmware_start(ab, mode);
- if (ret) {
- ath11k_err(ab, "failed to attach wmi: %d\n", ret);
- return ret;
- }
-
ret = ath11k_wmi_attach(ab);
if (ret) {
ath11k_err(ab, "failed to attach wmi: %d\n", ret);
- goto err_firmware_stop;
+ return ret;
}
ret = ath11k_htc_init(ab);
@@ -1041,7 +1349,7 @@ static int ath11k_core_start(struct ath11k_base *ab,
}
/* put hardware to DBS mode */
- if (ab->hw_params.single_pdev_only) {
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
@@ -1066,8 +1374,23 @@ err_hif_stop:
ath11k_hif_stop(ab);
err_wmi_detach:
ath11k_wmi_detach(ab);
-err_firmware_stop:
- ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+static int ath11k_core_start_firmware(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
+ }
return ret;
}
@@ -1097,16 +1420,22 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ ret = ath11k_core_start_firmware(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath11k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_ce_init_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to initialize CE: %d\n", ret);
- return ret;
+ goto err_firmware_stop;
}
ret = ath11k_dp_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to init DP: %d\n", ret);
- return ret;
+ goto err_firmware_stop;
}
switch (ath11k_crypto_mode) {
@@ -1127,7 +1456,7 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
mutex_lock(&ab->core_lock);
- ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ ret = ath11k_core_start(ab);
if (ret) {
ath11k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
@@ -1156,6 +1485,9 @@ err_core_stop:
err_dp_free:
ath11k_dp_free(ab);
mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
return ret;
}
@@ -1261,6 +1593,7 @@ static void ath11k_update_11d(struct work_struct *work)
pdev = &ab->pdevs[i];
ar = pdev->ar;
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
@@ -1269,12 +1602,11 @@ static void ath11k_update_11d(struct work_struct *work)
}
}
-static void ath11k_core_restart(struct work_struct *work)
+static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
{
- struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
- int i, ret = 0;
+ int i;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -1288,6 +1620,7 @@ static void ath11k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->peer_assoc_done);
@@ -1307,12 +1640,13 @@ static void ath11k_core_restart(struct work_struct *work)
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
+}
- ret = ath11k_core_reconfigure_on_crash(ab);
- if (ret) {
- ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
- return;
- }
+static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
@@ -1348,6 +1682,98 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ab->driver_recovery);
}
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ int ret;
+
+ if (!ab->is_reset)
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath11k_core_post_reconfigure_recovery(ab);
+}
+
+static void ath11k_core_reset(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success.
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath11k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH11K_RESET_TIMEOUT_HZ);
+
+ if (time_left) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ atomic_inc(&ab->fail_cont_count);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath11k_core_post_reconfigure_recovery(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH11K_RECOVER_START_TIMEOUT_HZ);
+
+ ath11k_hif_power_down(ab);
+ ath11k_qmi_free_resource(ab);
+ ath11k_hif_power_up(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
+}
+
static int ath11k_init_hw_params(struct ath11k_base *ab)
{
const struct ath11k_hw_params *hw_params = NULL;
@@ -1417,6 +1843,7 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
+ destroy_workqueue(ab->workqueue_aux);
destroy_workqueue(ab->workqueue);
kfree(ab);
@@ -1424,8 +1851,7 @@ void ath11k_core_free(struct ath11k_base *ab)
EXPORT_SYMBOL(ath11k_core_free);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus,
- const struct ath11k_bus_params *bus_params)
+ enum ath11k_bus bus)
{
struct ath11k_base *ab;
@@ -1439,9 +1865,17 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
if (!ab->workqueue)
goto err_sc_free;
+ ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
mutex_init(&ab->core_lock);
+ mutex_init(&ab->tbl_mtx_lock);
spin_lock_init(&ab->base_lock);
mutex_init(&ab->vdev_id_11d_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1450,16 +1884,18 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
+ INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
- ab->bus_params = *bus_params;
ab->hif.bus = bus;
return ab;
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
err_sc_free:
kfree(ab);
return NULL;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index c0228e91a596..95bca0b078b1 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -10,6 +11,9 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitfield.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <linux/rhashtable.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -23,6 +27,7 @@
#include "thermal.h"
#include "dbring.h"
#include "spectral.h"
+#include "wow.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -36,9 +41,26 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF
#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* The magic used by QCA spec */
+#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
extern unsigned int ath11k_frame_mode;
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
#define ATH11K_MON_TIMER_INTERVAL 10
+#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
@@ -118,6 +140,7 @@ enum ath11k_hw_rev {
ATH11K_HW_QCN9074_HW10,
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
+ ATH11K_HW_WCN6750_HW10,
};
enum ath11k_firmware_mode {
@@ -147,6 +170,39 @@ struct ath11k_ext_irq_grp {
struct net_device napi_ndev;
};
+enum ath11k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH11K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH11K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH11K_SMBIOS_CC_WW = 2,
+};
+
+struct ath11k_smbios_bdf {
+ struct dmi_header hdr;
+
+ u8 features_disabled;
+
+ /* enum ath11k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
+ u8 bdf_enabled;
+ u8 bdf_ext[];
+} __packed;
+
#define HEHANDLE_CAP_PHYINFO_SIZE 3
#define HECAP_PHYINFO_SIZE 9
#define HECAP_MACINFO_SIZE 5
@@ -189,6 +245,12 @@ enum ath11k_scan_state {
ATH11K_SCAN_ABORTING,
};
+enum ath11k_11d_state {
+ ATH11K_11D_IDLE,
+ ATH11K_11D_PREPARING,
+ ATH11K_11D_RUNNING,
+};
+
enum ath11k_dev_flags {
ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED,
@@ -204,6 +266,8 @@ enum ath11k_dev_flags {
ATH11K_FLAG_CE_IRQ_ENABLED,
ATH11K_FLAG_EXT_IRQ_ENABLED,
ATH11K_FLAG_FIXED_MEM_RGN,
+ ATH11K_FLAG_DEVICE_INIT_DONE,
+ ATH11K_FLAG_MULTI_MSI_VECTORS,
};
enum ath11k_monitor_flags {
@@ -212,6 +276,30 @@ enum ath11k_monitor_flags {
ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
+#define ATH11K_IPV6_UC_TYPE 0
+#define ATH11K_IPV6_AC_TYPE 1
+
+#define ATH11K_IPV6_MAX_COUNT 16
+#define ATH11K_IPV4_MAX_COUNT 2
+
+struct ath11k_arp_ns_offload {
+ u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
+ bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -263,6 +351,9 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
+ struct ath11k_arp_ns_offload arp_ns_offload;
+ struct ath11k_rekey_data rekey_data;
+
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */
@@ -590,6 +681,9 @@ struct ath11k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath11k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath11k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -607,12 +701,13 @@ struct ath11k {
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
u32 vdev_id_11d_scan;
- struct completion finish_11d_scan;
- struct completion finish_11d_ch_list;
- bool pending_11d;
+ struct completion completed_11d_scan;
+ enum ath11k_11d_state state_11d;
bool regdom_set_by_user;
int hw_rate_code;
u8 twt_enabled;
+ bool nlo_enabled;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
};
struct ath11k_band_cap {
@@ -654,12 +749,12 @@ struct ath11k_board_data {
size_t len;
};
-struct ath11k_bus_params {
- bool mhi_support;
- bool m3_fw_support;
- bool fixed_bdf_addr;
- bool fixed_mem_region;
- bool static_window_map;
+struct ath11k_pci_ops {
+ int (*wakeup)(struct ath11k_base *ab);
+ void (*release)(struct ath11k_base *ab);
+ int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
+ void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
+ u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
};
/* IPQ8074 HW channel counters frequency value in hertz */
@@ -703,6 +798,19 @@ struct ath11k_soc_dp_stats {
struct ath11k_dp_ring_bp_stats bp_stats;
};
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+ u16 hw_rev;
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
@@ -747,6 +855,18 @@ struct ath11k_base {
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
+
+ /* To synchronize rhash tbl write operation */
+ struct mutex tbl_mtx_lock;
+
+ /* The rhashtable containing struct ath11k_peer keyed by mac addr */
+ struct rhashtable *rhead_peer_addr;
+ struct rhashtable_params rhash_peer_addr_param;
+
+ /* The rhashtable containing struct ath11k_peer keyed by id */
+ struct rhashtable *rhead_peer_id;
+ struct rhashtable_params rhash_peer_id_param;
+
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN];
@@ -760,7 +880,6 @@ struct ath11k_base {
int bd_api;
struct ath11k_hw_params hw_params;
- struct ath11k_bus_params bus_params;
const struct firmware *cal_file;
@@ -788,6 +907,18 @@ struct ath11k_base {
struct work_struct restart_work;
struct work_struct update_11d_work;
u8 new_alpha2[3];
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -815,6 +946,18 @@ struct ath11k_base {
u32 subsystem_device;
} id;
+ struct {
+ struct {
+ const struct ath11k_msi_config *config;
+ u32 ep_base_data;
+ u32 irqs[32];
+ u32 addr_lo;
+ u32 addr_hi;
+ } msi;
+
+ const struct ath11k_pci_ops *ops;
+ } pci;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -985,8 +1128,7 @@ int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus,
- const struct ath11k_bus_params *bus_params);
+ enum ath11k_bus bus);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
@@ -996,7 +1138,7 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
const char *name);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
-
+int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index a82266c8befc..9648e0017393 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -596,6 +596,10 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
ret = ath11k_wmi_force_fw_hang_cmd(ar,
ATH11K_WMI_FW_HANG_ASSERT_TYPE,
ATH11K_WMI_FW_HANG_DELAY);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath11k_info(ab, "user requested hw restart\n");
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ ret = 0;
} else {
ret = -EINVAL;
goto exit;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 2ec09ae90080..1dba7b9e0bda 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -1082,10 +1083,10 @@ static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
srng = &hal->srng_list[ring_id];
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
- srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
else
- srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
}
@@ -1120,7 +1121,7 @@ int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11k_DBG_HAL,
"target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
target_reg,
- HAL_SHADOW_REG(shadow_cfg_idx),
+ HAL_SHADOW_REG(ab, shadow_cfg_idx),
shadow_cfg_idx,
ring_type, ring_num);
@@ -1193,12 +1194,12 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
s = &hal->srng_config[HAL_REO_REINJECT];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
s = &hal->srng_config[HAL_REO_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index a7d9b4c551ad..1aadb1566df8 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -31,12 +32,12 @@ struct ath11k_base;
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
-#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr
#define HAL_SHADOW_NUM_REGS 36
#define HAL_HP_OFFSET_IN_REG_START 1
#define HAL_OFFSET_FROM_HP_TO_TP 4
-#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x)))
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
@@ -180,16 +181,18 @@ struct ath11k_base;
#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
/* REO CMD R0 address */
-#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_cmd_ring_base_lsb
/* REO CMD R2 address */
-#define HAL_REO_CMD_HP 0x00003020
+#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp
/* SW2REO R0 address */
-#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_sw2reo_ring_base_lsb
/* SW2REO R2 address */
-#define HAL_SW2REO_RING_HP 0x00003028
+#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp
/* CE ring R0 address */
#define HAL_CE_DST_RING_BASE_LSB 0x00000000
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 6913b7494b9b..069c29a4fac7 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -272,6 +272,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
ep_tx_complete(htc->ab, skb);
}
+static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
@@ -376,6 +381,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
ath11k_htc_suspend_complete(ab, false);
break;
case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath11k_htc_wakeup_from_suspend(ab);
break;
default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index d1b0e76d9ec2..09ce357f0f0d 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -1014,6 +1015,45 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
};
+const struct ath11k_hw_ops wcn6750_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+};
+
#define ATH11K_TX_RING_MASK_0 0x1
#define ATH11K_TX_RING_MASK_1 0x2
#define ATH11K_TX_RING_MASK_2 0x4
@@ -1908,10 +1948,18 @@ const struct ath11k_hw_regs ipq8074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@@ -1932,6 +1980,9 @@ const struct ath11k_hw_regs ipq8074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
@@ -1979,10 +2030,18 @@ const struct ath11k_hw_regs qca6390_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003a4,
.hal_reo_tcl_ring_hp = 0x00003050,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x000004ac,
.hal_reo_status_hp = 0x00003068,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@@ -2003,6 +2062,9 @@ const struct ath11k_hw_regs qca6390_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
};
const struct ath11k_hw_regs qcn9074_regs = {
@@ -2050,10 +2112,18 @@ const struct ath11k_hw_regs qcn9074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
@@ -2074,6 +2144,9 @@ const struct ath11k_hw_regs qcn9074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
};
const struct ath11k_hw_regs wcn6855_regs = {
@@ -2121,10 +2194,18 @@ const struct ath11k_hw_regs wcn6855_regs = {
.hal_reo_tcl_ring_base_lsb = 0x00000454,
.hal_reo_tcl_ring_hp = 0x00003060,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x0000055c,
.hal_reo_status_hp = 0x00003078,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
@@ -2145,6 +2226,91 @@ const struct ath11k_hw_regs wcn6855_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+};
+
+const struct ath11k_hw_regs wcn6750_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x00000504,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
@@ -2154,3 +2320,23 @@ const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
};
+
+static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
+ {.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
+ {.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
+ {.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
+ {.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
+ {.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
+ {.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
+ {.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
+ {.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
+ {.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
+ {.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
+ {.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
+};
+
+const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
+ .freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 27ca4a9c20fc..6d588cd80093 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@@ -189,11 +190,20 @@ struct ath11k_hw_params {
const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory;
- bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
bool current_cc_support;
bool dbr_debug_support;
+ bool global_reset;
+ const struct cfg80211_sar_capa *bios_sar_capa;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+ bool static_window_map;
+ bool hybrid_bus_type;
+ u8 dp_window_idx;
+ u8 ce_window_idx;
+ bool fixed_fw_mem;
};
struct ath11k_hw_ops {
@@ -243,6 +253,7 @@ extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ops wcn6855_ops;
+extern const struct ath11k_hw_ops wcn6750_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
@@ -290,10 +301,16 @@ enum ath11k_bd_ie_board_type {
ATH11K_BD_IE_BOARD_DATA = 1,
};
+enum ath11k_bd_ie_regdb_type {
+ ATH11K_BD_IE_REGDB_NAME = 0,
+ ATH11K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath11k_bd_ie_type {
/* contains sub IEs of enum ath11k_bd_ie_board_type */
ATH11K_BD_IE_BOARD = 0,
- ATH11K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath11k_bd_ie_regdb_type */
+ ATH11K_BD_IE_REGDB = 1,
};
struct ath11k_hw_regs {
@@ -339,6 +356,12 @@ struct ath11k_hw_regs {
u32 hal_reo_status_ring_base_lsb;
u32 hal_reo_status_hp;
+ u32 hal_reo_cmd_ring_base_lsb;
+ u32 hal_reo_cmd_ring_hp;
+
+ u32 hal_sw2reo_ring_base_lsb;
+ u32 hal_sw2reo_ring_hp;
+
u32 hal_seq_wcss_umac_ce0_src_reg;
u32 hal_seq_wcss_umac_ce0_dst_reg;
u32 hal_seq_wcss_umac_ce1_src_reg;
@@ -354,11 +377,27 @@ struct ath11k_hw_regs {
u32 pcie_qserdes_sysclk_en_sel;
u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_shadow_base_addr;
};
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs;
+extern const struct ath11k_hw_regs wcn6750_regs;
+
+static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH11K_BD_IE_BOARD:
+ return "board data";
+ case ATH11K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e6b34b0d61bd..1957e1713548 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
+#include <linux/inetdevice.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -16,6 +21,8 @@
#include "testmode.h"
#include "peer.h"
#include "debugfs_sta.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -868,13 +875,16 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
ath11k_peer_rx_tid_cleanup(ar, peer);
+ ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
}
spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
ar->num_peers = 0;
ar->num_stations = 0;
@@ -1626,7 +1636,7 @@ static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
- ratemask = sta->supp_rates[band];
+ ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1671,7 +1681,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -1708,7 +1718,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1766,7 +1776,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1868,7 +1878,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -1914,17 +1924,17 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
- if (vht_nss > sta->rx_nss) {
+ if (vht_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
- for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (vht_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
@@ -1934,8 +1944,8 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n",
- sta->rx_nss, sta->addr);
- vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ sta->deflink.rx_nss, sta->addr);
+ vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
}
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -1949,7 +1959,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2068,7 +2078,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
{
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
enum nl80211_band band;
u16 *he_mcs_mask;
u8 max_nss, he_mcs;
@@ -2125,7 +2135,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
@@ -2157,10 +2167,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -2203,9 +2213,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
- if (he_nss > sta->rx_nss) {
+ if (he_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
- for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (he_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
@@ -2215,11 +2225,11 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n",
- sta->rx_nss, sta->addr);
- he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ sta->deflink.rx_nss, sta->addr);
+ he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1];
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -2273,7 +2283,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
he_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
if (arg->peer_phymode == MODE_11AX_HE160 ||
arg->peer_phymode == MODE_11AX_HE80_80) {
@@ -2306,7 +2316,7 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor;
@@ -2316,19 +2326,19 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
band = def.chan->band;
- if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
return;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
arg->bw_40 = true;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
- arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa);
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
arg->peer_mpdu_density =
ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
arg->peer_he_caps_6ghz));
@@ -2354,17 +2364,17 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
- if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa)
+ if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa)
return;
if (ht_cap->ht_supported) {
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
} else {
- smps = le16_get_bits(sta->he_6ghz_capa.capa,
+ smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_SM_PS);
}
@@ -2488,15 +2498,15 @@ err:
static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH11K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->vht_cap.cap &
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2508,13 +2518,13 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
}
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
@@ -2523,24 +2533,24 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
@@ -2569,23 +2579,23 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->he_cap.has_he &&
+ if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->vht_cap.vht_supported &&
- !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
@@ -2598,15 +2608,15 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
- if (sta->he_cap.has_he &&
+ if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
- } else if (sta->vht_cap.vht_supported &&
- !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath11k_mac_get_phymode_vht(ar, sta);
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2729,8 +2739,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->ht_cap,
- le16_to_cpu(ap_sta->he_6ghz_capa.capa));
+ &ap_sta->deflink.ht_cap,
+ le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2750,6 +2760,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
@@ -2807,6 +2818,8 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false;
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
@@ -3093,6 +3106,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
int ret = 0;
u8 rateidx;
u32 rate;
+ u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex);
@@ -3391,6 +3405,18 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ ipv4_cnt = min(info->arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
+ memcpy(arvif->arp_ns_offload.ipv4_addr, info->arp_addr_list,
+ ipv4_cnt * sizeof(u32));
+ memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
+ arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ info->arp_addr_cnt,
+ vif->addr, arvif->arp_ns_offload.ipv4_addr);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3601,26 +3627,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- /* Currently the pending_11d=true only happened 1 time while
- * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
- * ath11k_mac_op_add_interface(), after wlan interface up,
- * pending_11d=false always.
- * If remove below wait, it always happened scan fail and lead connect
- * fail while wlan interface up, because it has a 11d scan which is running
- * in firmware, and lead this scan failed.
- */
- if (ar->pending_11d) {
- long time_left;
- unsigned long timeout = 5 * HZ;
-
- if (ar->supports_6ghz)
- timeout += 5 * HZ;
-
- time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac wait 11d channel list time left %ld\n", time_left);
- }
-
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
@@ -3686,6 +3692,10 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
return ret;
}
@@ -3997,7 +4007,7 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
}
/* Avoid updating invalid nss as fixed rate*/
- if (nss > sta->rx_nss)
+ if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
@@ -4047,7 +4057,7 @@ ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
}
/* Avoid updating invalid nss as fixed rate */
- if (nss > sta->rx_nss)
+ if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
@@ -4114,12 +4124,12 @@ static int ath11k_station_assoc(struct ath11k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
- } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
if (ret)
@@ -4133,7 +4143,8 @@ static int ath11k_station_assoc(struct ath11k *ar,
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa));
+ &sta->deflink.ht_cap,
+ le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4295,10 +4306,10 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
- } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
} else {
@@ -4537,6 +4548,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
ath11k_mac_dec_num_stations(arvif, sta);
+ mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (skip_peer_delete && peer) {
@@ -4544,12 +4556,14 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
} else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
peer->sta = NULL;
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -4617,10 +4631,10 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
int ret = 0;
s16 txpwr;
- if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
- txpwr = sta->txpwr.power;
+ txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
@@ -4681,7 +4695,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
sta->smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -4689,7 +4704,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
@@ -4704,7 +4719,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
- sta->bandwidth, sta->addr);
+ sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -4713,7 +4728,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->rx_nss;
+ arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
@@ -5585,7 +5600,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
+ queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -5732,6 +5747,27 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
return ret;
}
+static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH11K_RECONFIGURE_TIMEOUT_HZ);
+}
+
static int ath11k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
@@ -5748,6 +5784,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
break;
case ATH11K_STATE_RESTARTING:
ar->state = ATH11K_STATE_RESTARTED;
+ ath11k_mac_wait_reconfigure(ab);
break;
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
@@ -5814,7 +5851,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* TODO: Do we need to enable ANI? */
- ath11k_reg_update_chan_list(ar);
+ ath11k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
@@ -5881,6 +5918,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&ar->ab->update_11d_work);
cancel_work_sync(&ar->ab->rfkill_work);
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
@@ -6051,7 +6093,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
return false;
}
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
{
struct wmi_11d_scan_start_params param;
int ret;
@@ -6079,28 +6121,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
- if (wait)
- reinit_completion(&ar->finish_11d_scan);
-
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = vdev_id;
- if (wait) {
- ar->pending_11d = true;
- ret = wait_for_completion_timeout(&ar->finish_11d_scan,
- 5 * HZ);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac 11d scan left time %d\n", ret);
-
- if (!ret)
- ar->pending_11d = false;
- }
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ar->state_11d = ATH11K_11D_RUNNING;
}
fin:
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6123,12 +6159,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
- if (ret)
+ if (ret) {
ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret);
- else
+ } else {
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6324,8 +6363,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
-
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH11K_11D_PREPARING;
+ }
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@@ -6370,22 +6411,12 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- reinit_completion(&ar->peer_delete_done);
-
- fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
- arvif->vdev_id);
+ fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (fbret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- arvif->vdev_id, vif->addr);
+ ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
+ vif->addr, arvif->vdev_id, fbret);
goto err;
}
-
- fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
- vif->addr);
- if (fbret)
- goto err;
-
- ar->num_peers--;
}
err_vdev_del:
@@ -7126,6 +7157,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
int ret;
mutex_lock(&ar->conf_mutex);
@@ -7137,9 +7169,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath11k_peer_find_by_addr(ab, ar->mac_addr))
- ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_stop(ar);
@@ -7190,7 +7226,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
}
@@ -7264,31 +7300,47 @@ static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
{
- struct ath11k *ar = hw->priv;
long time_left;
-
- if (drop)
- return;
+ int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH11K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH11K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
- time_left);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac mgmt tx flush mgmt pending %d\n",
- atomic_read(&ar->num_pending_mgmt_tx));
+ return ret;
+}
+
+int ath11k_mac_wait_tx_complete(struct ath11k *ar)
+{
+ ath11k_mac_drain_tx(ar);
+ return ath11k_mac_flush_tx_complete(ar);
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (drop)
+ return;
+
+ ath11k_mac_flush_tx_complete(ar);
}
static int
@@ -7711,13 +7763,13 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b
spin_lock_bh(&ar->ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
if (peer->sta) {
- if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported ||
- peer->sta->rx_nss < vht_nss)) {
+ if (vht_fixed_rate && (!peer->sta->deflink.vht_cap.vht_supported ||
+ peer->sta->deflink.rx_nss < vht_nss)) {
ret = false;
goto out;
}
- if (he_fixed_rate && (!peer->sta->he_cap.has_he ||
- peer->sta->rx_nss < he_nss)) {
+ if (he_fixed_rate && (!peer->sta->deflink.he_cap.has_he ||
+ peer->sta->deflink.rx_nss < he_nss)) {
ret = false;
goto out;
}
@@ -7887,6 +7939,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
@@ -7898,6 +7952,30 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ar->pdev->pdev_id);
ar->state = ATH11K_STATE_ON;
ieee80211_wake_queues(ar->hw);
+
+ if (ar->ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ }
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "recovery count %d\n", recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
+ }
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -8075,6 +8153,198 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
}
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_arp_ns_offload *offload;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct list_head *p;
+ u32 count, scope;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac op ipv6 changed\n");
+
+ offload = &arvif->arp_ns_offload;
+ count = 0;
+
+ read_lock_bh(&idev->lock);
+
+ memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
+ memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
+ memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
+
+ /* get unicast address */
+ list_for_each(p, &idev->addr_list) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
+ }
+ }
+
+ /* get anycast address */
+ for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
+ }
+ }
+
+generate:
+ offload->ipv6_count = count;
+ read_unlock_bh(&idev->lock);
+
+ /* generate ns multicast address */
+ ath11k_generate_ns_mc_addr(ar, offload);
+}
+#endif
+
+static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct ath11k *ar = hw->priv;
+ const struct cfg80211_sar_sub_specs *sspec = sar->sub_specs;
+ int ret, index;
+ u8 *sar_tbl;
+ u32 i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
+ !ar->ab->hw_params.bios_sar_capa) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
+ goto exit;
+ }
+
+ sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL);
+ if (!sar_tbl) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
+ ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
+ sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1);
+ continue;
+ }
+
+ /* chain0 and chain1 share same power setting */
+ sar_tbl[sspec->freq_range_index] = sspec->power;
+ index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1);
+ sar_tbl[index] = sspec->power;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n",
+ sspec->freq_range_index, sar_tbl[sspec->freq_range_index]);
+ sspec++;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sar power: %d", ret);
+
+ kfree(sar_tbl);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.start = ath11k_mac_op_start,
@@ -8089,6 +8359,7 @@ static const struct ieee80211_ops ath11k_ops = {
.hw_scan = ath11k_mac_op_hw_scan,
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
+ .set_rekey_data = ath11k_mac_op_set_rekey_data,
.sta_state = ath11k_mac_op_sta_state,
.sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
@@ -8110,9 +8381,22 @@ static const struct ieee80211_ops ath11k_ops = {
.flush = ath11k_mac_op_flush,
.sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath11k_wow_op_suspend,
+ .resume = ath11k_wow_op_resume,
+ .set_wakeup = ath11k_wow_op_set_wakeup,
+#endif
+
#ifdef CONFIG_ATH11K_DEBUGFS
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = ath11k_mac_op_ipv6_changed,
+#endif
+
+ .set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
};
static void ath11k_mac_update_ch_list(struct ath11k *ar,
@@ -8365,6 +8649,8 @@ void ath11k_mac_unregister(struct ath11k_base *ab)
__ath11k_mac_unregister(ar);
}
+
+ ath11k_peer_rhash_tbl_destroy(ab);
}
static int __ath11k_mac_register(struct ath11k *ar)
@@ -8479,6 +8765,24 @@ static int __ath11k_mac_register(struct ath11k *ar)
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath11k_wow_init(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
@@ -8489,9 +8793,12 @@ static int __ath11k_mac_register(struct ath11k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
- if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD,
+ ar->ab->wmi_ab.svc_map)) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_BSS_COLOR);
+ ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION);
+ }
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -8515,6 +8822,10 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ab->hw_params.bios_sar_capa)
+ ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
@@ -8536,6 +8847,17 @@ static int __ath11k_mac_register(struct ath11k *ar)
goto err_unregister_hw;
}
+ if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n", ret);
+ }
+
ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
@@ -8575,6 +8897,10 @@ int ath11k_mac_register(struct ath11k_base *ab)
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+ ret = ath11k_peer_rhash_tbl_init(ab);
+ if (ret)
+ return ret;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
@@ -8604,6 +8930,8 @@ err_cleanup:
__ath11k_mac_unregister(ar);
}
+ ath11k_peer_rhash_tbl_destroy(ab);
+
return ret;
}
@@ -8671,8 +8999,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
- init_completion(&ar->finish_11d_scan);
- init_completion(&ar->finish_11d_ch_list);
+ init_completion(&ar->completed_11d_scan);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0e6c870b09c8..7f93e3a9ca23 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
@@ -172,4 +172,5 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
+int ath11k_mac_wait_tx_complete(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index fc3524e83e52..c44df17719f6 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
-/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include <linux/msi.h>
#include <linux/pci.h>
@@ -11,6 +14,7 @@
#include "debug.h"
#include "mhi.h"
#include "pci.h"
+#include "pcic.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define RDDM_DUMP_SIZE 0x420000
@@ -205,7 +209,7 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
{
u32 val;
- val = ath11k_pci_read32(ab, MHISTATUS);
+ val = ath11k_pcic_read32(ab, MHISTATUS);
ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
@@ -213,29 +217,29 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
* has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR.
*/
- ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+ ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10);
}
static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_TXVECDB, 0);
+ ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
}
static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+ ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
}
static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_RXVECDB, 0);
+ ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
}
static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+ ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
}
void ath11k_mhi_clear_vector(struct ath11k_base *ab)
@@ -254,9 +258,8 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
int *irq;
unsigned int msi_data;
- ret = ath11k_pci_get_user_msi_assignment(ab_pci,
- "MHI", &num_vectors,
- &user_base_data, &base_vector);
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
+ &user_base_data, &base_vector);
if (ret)
return ret;
@@ -270,11 +273,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
for (i = 0; i < num_vectors; i++) {
msi_data = base_vector;
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
msi_data += i;
- irq[i] = ath11k_pci_get_msi_irq(ab->dev,
- msi_data);
+ irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
}
ab_pci->mhi_ctrl->irq = irq;
@@ -292,15 +294,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
+static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+};
+
static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "mhi notify status reason %s\n",
+ ath11k_mhi_op_callback_to_str(cb));
+
switch (cb) {
case MHI_CB_SYS_ERROR:
ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
+ case MHI_CB_EE_RDDM:
+ if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ break;
default:
break;
}
@@ -371,7 +406,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
return ret;
}
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
@@ -428,216 +463,62 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
mhi_free_controller(mhi_ctrl);
}
-static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state)
-{
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- return "INIT";
- case ATH11K_MHI_DEINIT:
- return "DEINIT";
- case ATH11K_MHI_POWER_ON:
- return "POWER_ON";
- case ATH11K_MHI_POWER_OFF:
- return "POWER_OFF";
- case ATH11K_MHI_FORCE_POWER_OFF:
- return "FORCE_POWER_OFF";
- case ATH11K_MHI_SUSPEND:
- return "SUSPEND";
- case ATH11K_MHI_RESUME:
- return "RESUME";
- case ATH11K_MHI_TRIGGER_RDDM:
- return "TRIGGER_RDDM";
- case ATH11K_MHI_RDDM_DONE:
- return "RDDM_DONE";
- default:
- return "UNKNOWN";
- }
-};
-
-static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
+ int ret;
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_DEINIT:
- clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_POWER_ON:
- set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_POWER_OFF:
- case ATH11K_MHI_FORCE_POWER_OFF:
- clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
- clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
- clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_SUSPEND:
- set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_RESUME:
- clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_RDDM_DONE:
- set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
- break;
- default:
- ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
- }
-}
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
-static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
-{
- struct ath11k_base *ab = ab_pci->ab;
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to prepare mhi: %d", ret);
+ return ret;
+ }
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_DEINIT:
- case ATH11K_MHI_POWER_ON:
- if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_FORCE_POWER_OFF:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_POWER_OFF:
- case ATH11K_MHI_SUSPEND:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_RESUME:
- if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_RDDM_DONE:
- return 0;
- default:
- ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to power up mhi: %d", ret);
+ return ret;
}
- ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state,
- ab_pci->mhi_state);
+ return 0;
+}
- return -EINVAL;
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+{
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
-static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
+int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
- ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state);
- if (ret)
- goto out;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
-
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_DEINIT:
- mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
- ret = 0;
- break;
- case ATH11K_MHI_POWER_ON:
- ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_POWER_OFF:
- mhi_power_down(ab_pci->mhi_ctrl, true);
- ret = 0;
- break;
- case ATH11K_MHI_FORCE_POWER_OFF:
- mhi_power_down(ab_pci->mhi_ctrl, false);
- ret = 0;
- break;
- case ATH11K_MHI_SUSPEND:
- ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_RESUME:
- /* Do force MHI resume as some devices like QCA6390, WCN6855
- * are not in M3 state but they are functional. So just ignore
- * the MHI state while resuming.
- */
- ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_RDDM_DONE:
- break;
- default:
- ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
- ret = -EINVAL;
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend mhi: %d", ret);
+ return ret;
}
- if (ret)
- goto out;
-
- ath11k_mhi_set_state_bit(ab_pci, mhi_state);
-
return 0;
-
-out:
- ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
- return ret;
}
-int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{
+ struct ath11k_base *ab = ab_pci->ab;
int ret;
- ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
-
- ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT);
- if (ret)
- goto out;
-
- ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON);
- if (ret)
- goto out;
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume mhi: %d", ret);
+ return ret;
+ }
return 0;
-
-out:
- return ret;
-}
-
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
-}
-
-void ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND);
-}
-
-void ath11k_mhi_resume(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 488dada5d31c..8d9f852da695 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -16,19 +16,6 @@
#define MHICTRL 0x38
#define MHICTRL_RESET_MASK 0x2
-enum ath11k_mhi_state {
- ATH11K_MHI_INIT,
- ATH11K_MHI_DEINIT,
- ATH11K_MHI_POWER_ON,
- ATH11K_MHI_POWER_OFF,
- ATH11K_MHI_FORCE_POWER_OFF,
- ATH11K_MHI_SUSPEND,
- ATH11K_MHI_RESUME,
- ATH11K_MHI_TRIGGER_RDDM,
- ATH11K_MHI_RDDM,
- ATH11K_MHI_RDDM_DONE,
-};
-
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
@@ -36,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
void ath11k_mhi_clear_vector(struct ath11k_base *ab);
-void ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
-void ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
+int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 903758751c99..dedf1b88ddf6 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -13,29 +14,15 @@
#include "hif.h"
#include "mhi.h"
#include "debug.h"
+#include "pcic.h"
#define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32
-#define ATH11K_PCI_IRQ_CE0_OFFSET 3
-#define ATH11K_PCI_IRQ_DP_OFFSET 14
-
-#define WINDOW_ENABLE_BIT 0x40000000
-#define WINDOW_REG_ADDRESS 0x310c
-#define WINDOW_VALUE_MASK GENMASK(24, 19)
-#define WINDOW_START 0x80000
-#define WINDOW_RANGE_MASK GENMASK(18, 0)
-
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
-/* BAR0 + 4k is always accessible, and no
- * need to force wakeup.
- * 4K - 32 = 0xFE0
- */
-#define ACCESS_ALWAYS_OFF 0xFE0
-
#define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
@@ -49,233 +36,119 @@ static const struct pci_device_id ath11k_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
-static const struct ath11k_bus_params ath11k_pci_bus_params = {
- .mhi_support = true,
- .m3_fw_support = true,
- .fixed_bdf_addr = false,
- .fixed_mem_region = false,
-};
+static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-static const struct ath11k_msi_config ath11k_msi_config[] = {
- {
- .total_vectors = 32,
- .total_users = 4,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 10, .base_vector = 3 },
- { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
- { .name = "DP", .num_vectors = 18, .base_vector = 14 },
- },
- },
- {
- .total_vectors = 16,
- .total_users = 3,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 5, .base_vector = 3 },
- { .name = "DP", .num_vectors = 8, .base_vector = 8 },
- },
- },
-};
+ return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+}
-static const struct ath11k_msi_config msi_config_one_msi = {
- .total_vectors = 1,
- .total_users = 4,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 1, .base_vector = 0 },
- { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
- { .name = "DP", .num_vectors = 1, .base_vector = 0 },
- },
-};
+static void ath11k_pci_bus_release(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
- "bhi",
- "mhi-er0",
- "mhi-er1",
- "ce0",
- "ce1",
- "ce2",
- "ce3",
- "ce4",
- "ce5",
- "ce6",
- "ce7",
- "ce8",
- "ce9",
- "ce10",
- "ce11",
- "host2wbm-desc-feed",
- "host2reo-re-injection",
- "host2reo-command",
- "host2rxdma-monitor-ring3",
- "host2rxdma-monitor-ring2",
- "host2rxdma-monitor-ring1",
- "reo2ost-exception",
- "wbm2host-rx-release",
- "reo2host-status",
- "reo2host-destination-ring4",
- "reo2host-destination-ring3",
- "reo2host-destination-ring2",
- "reo2host-destination-ring1",
- "rxdma2host-monitor-destination-mac3",
- "rxdma2host-monitor-destination-mac2",
- "rxdma2host-monitor-destination-mac1",
- "ppdu-end-interrupts-mac3",
- "ppdu-end-interrupts-mac2",
- "ppdu-end-interrupts-mac1",
- "rxdma2host-monitor-status-ring-mac3",
- "rxdma2host-monitor-status-ring-mac2",
- "rxdma2host-monitor-status-ring-mac1",
- "host2rxdma-host-buf-ring-mac3",
- "host2rxdma-host-buf-ring-mac2",
- "host2rxdma-host-buf-ring-mac1",
- "rxdma2host-destination-ring-mac3",
- "rxdma2host-destination-ring-mac2",
- "rxdma2host-destination-ring-mac1",
- "host2tcl-input-ring4",
- "host2tcl-input-ring3",
- "host2tcl-input-ring2",
- "host2tcl-input-ring1",
- "wbm2host-tx-completions-ring3",
- "wbm2host-tx-completions-ring2",
- "wbm2host-tx-completions-ring1",
- "tcl2host-status-ring",
-};
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{
struct ath11k_base *ab = ab_pci->ab;
- u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
+ u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
lockdep_assert_held(&ab_pci->window_lock);
if (window != ab_pci->register_window) {
- iowrite32(WINDOW_ENABLE_BIT | window,
- ab->mem + WINDOW_REG_ADDRESS);
- ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ab_pci->register_window = window;
}
}
-static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+static void
+ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
- u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
- u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
- u32 window;
-
- window = (umac_window << 12) | (ce_window << 6);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start = ATH11K_PCI_WINDOW_START;
- iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
}
-static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab,
- u32 offset)
+static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
{
- u32 window_start;
-
- /* If offset lies within DP register range, use 3rd window */
- if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
- window_start = 3 * WINDOW_START;
- /* If offset lies within CE register range, use 2nd window */
- else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
- window_start = 2 * WINDOW_START;
- else
- window_start = WINDOW_START;
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start = ATH11K_PCI_WINDOW_START;
+ u32 val;
+
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
- return window_start;
+ return val;
}
-void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start;
-
- /* for offset beyond BAR + 4K - 32, may
- * need to wakeup MHI to access.
- */
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
-
- if (offset < WINDOW_START) {
- iowrite32(value, ab->mem + offset);
- } else {
- if (ab->bus_params.static_window_map)
- window_start = ath11k_pci_get_window_start(ab, offset);
- else
- window_start = WINDOW_START;
-
- if (window_start == WINDOW_START) {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
- } else {
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- }
- }
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+ return pci_irq_vector(pci_dev, vector);
}
-u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 val, window_start;
+static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
+ .wakeup = ath11k_pci_bus_wake_up,
+ .release = ath11k_pci_bus_release,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
- /* for offset beyond BAR + 4K - 32, may
- * need to wakeup MHI to access.
- */
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
- if (offset < WINDOW_START) {
- val = ioread32(ab->mem + offset);
- } else {
- if (ab->bus_params.static_window_map)
- window_start = ath11k_pci_get_window_start(ab, offset);
- else
- window_start = WINDOW_START;
-
- if (window_start == WINDOW_START) {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
- } else {
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- }
- }
+static const struct ath11k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+{
+ u32 umac_window;
+ u32 ce_window;
+ u32 window;
- return val;
+ umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+ ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+ window = (umac_window << 12) | (ce_window << 6);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
}
static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
{
u32 val, delay;
- val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
val |= PCIE_SOC_GLOBAL_RESET_V;
- ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
/* TODO: exact time to sleep is uncertain */
delay = 10;
@@ -284,11 +157,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
/* Need to toggle V bit back otherwise stuck in reset status */
val &= ~PCIE_SOC_GLOBAL_RESET_V;
- ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
mdelay(delay);
- val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff)
ath11k_warn(ab, "link down error during global reset\n");
}
@@ -298,10 +171,10 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
u32 val;
/* read cookie */
- val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
- val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */
@@ -310,16 +183,16 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
* continuing warm path and entering dead loop.
*/
- ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10);
- val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* A read clear register. clear the register to prevent
* Q6 from entering wrong code path.
*/
- val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
}
@@ -329,14 +202,14 @@ static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
u32 v;
int i;
- v = ath11k_pci_read32(ab, offset);
+ v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
for (i = 0; i < 10; i++) {
- ath11k_pci_write32(ab, offset, (v & ~mask) | value);
+ ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
- v = ath11k_pci_read32(ab, offset);
+ v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
@@ -397,23 +270,23 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
u32 val;
int i;
- val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/
for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
if (val == 0xffffffff)
mdelay(5);
- ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
- val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
}
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
- val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL;
- ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
- val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
@@ -427,21 +300,21 @@ static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
* So when download SBL again, SBL will open Interrupt and
* receive it, and crash immediately.
*/
- ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+ ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
}
static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
{
u32 val;
- val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
- ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+ ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
}
static void ath11k_pci_force_wake(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
mdelay(5);
}
@@ -463,463 +336,6 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
ath11k_mhi_set_mhictrl_reset(ab);
}
-int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
-
- return pci_irq_vector(pci_dev, vector);
-}
-
-static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
- u32 *msi_addr_hi)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- struct pci_dev *pci_dev = to_pci_dev(ab->dev);
-
- pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
- msi_addr_lo);
-
- if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
- pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
- msi_addr_hi);
- } else {
- *msi_addr_hi = 0;
- }
-}
-
-int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector)
-{
- struct ath11k_base *ab = ab_pci->ab;
- const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
- int idx;
-
- for (idx = 0; idx < msi_config->total_users; idx++) {
- if (strcmp(user_name, msi_config->users[idx].name) == 0) {
- *num_vectors = msi_config->users[idx].num_vectors;
- *base_vector = msi_config->users[idx].base_vector;
- *user_base_data = *base_vector + ab_pci->msi_ep_base_data;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI,
- "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
- user_name, *num_vectors, *user_base_data,
- *base_vector);
-
- return 0;
- }
- }
-
- ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
-
- return -EINVAL;
-}
-
-static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
- u32 *msi_idx)
-{
- u32 i, msi_data_idx;
-
- for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- if (ce_id == i)
- break;
-
- msi_data_idx++;
- }
- *msi_idx = msi_data_idx;
-}
-
-static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-
- return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
- num_vectors, user_base_data,
- base_vector);
-}
-
-static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
-{
- int i, j;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- for (j = 0; j < irq_grp->num_irq; j++)
- free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
-
- netif_napi_del(&irq_grp->napi);
- }
-}
-
-static void ath11k_pci_free_irq(struct ath11k_base *ab)
-{
- int i, irq_idx;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
- free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
- }
-
- ath11k_pci_free_ext_irq(ab);
-}
-
-static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 irq_idx;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
- enable_irq(ab->irq_num[irq_idx]);
-}
-
-static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 irq_idx;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
- disable_irq_nosync(ab->irq_num[irq_idx]);
-}
-
-static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
-{
- int i;
-
- clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- ath11k_pci_ce_irq_disable(ab, i);
- }
-}
-
-static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
-{
- int i;
- int irq_idx;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
- synchronize_irq(ab->irq_num[irq_idx]);
- }
-}
-
-static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
-{
- struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
- int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
-
- ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
-
- enable_irq(ce_pipe->ab->irq_num[irq_idx]);
-}
-
-static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
-{
- struct ath11k_ce_pipe *ce_pipe = arg;
- struct ath11k_base *ab = ce_pipe->ab;
- int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
-
- if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
- return IRQ_HANDLED;
-
- /* last interrupt received for this CE */
- ce_pipe->timestamp = jiffies;
-
- disable_irq_nosync(ab->irq_num[irq_idx]);
-
- tasklet_schedule(&ce_pipe->intr_tq);
-
- return IRQ_HANDLED;
-}
-
-static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
- int i;
-
- /* In case of one MSI vector, we handle irq enable/disable
- * in a uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-}
-
-static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
-{
- int i;
-
- clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
-
- ath11k_pci_ext_grp_disable(irq_grp);
-
- if (irq_grp->napi_enabled) {
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
- irq_grp->napi_enabled = false;
- }
- }
-}
-
-static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
- int i;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-}
-
-static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
-{
- int i;
-
- set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- if (!irq_grp->napi_enabled) {
- napi_enable(&irq_grp->napi);
- irq_grp->napi_enabled = true;
- }
- ath11k_pci_ext_grp_enable(irq_grp);
- }
-}
-
-static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
-{
- int i, j, irq_idx;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- for (j = 0; j < irq_grp->num_irq; j++) {
- irq_idx = irq_grp->irqs[j];
- synchronize_irq(ab->irq_num[irq_idx]);
- }
- }
-}
-
-static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
-{
- __ath11k_pci_ext_irq_disable(ab);
- ath11k_pci_sync_ext_irqs(ab);
-}
-
-static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
-{
- struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
- struct ath11k_ext_irq_grp,
- napi);
- struct ath11k_base *ab = irq_grp->ab;
- int work_done;
- int i;
-
- work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- for (i = 0; i < irq_grp->num_irq; i++)
- enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
- }
-
- if (work_done > budget)
- work_done = budget;
-
- return work_done;
-}
-
-static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
-{
- struct ath11k_ext_irq_grp *irq_grp = arg;
- struct ath11k_base *ab = irq_grp->ab;
- int i;
-
- if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
- return IRQ_HANDLED;
-
- ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
-
- /* last interrupt received for this group */
- irq_grp->timestamp = jiffies;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-
- napi_schedule(&irq_grp->napi);
-
- return IRQ_HANDLED;
-}
-
-static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
- u32 user_base_data = 0, base_vector = 0;
-
- ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
- &num_vectors,
- &user_base_data,
- &base_vector);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- u32 num_irq = 0;
-
- irq_grp->ab = ab;
- irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
-
- if (ab->hw_params.ring_mask->tx[i] ||
- ab->hw_params.ring_mask->rx[i] ||
- ab->hw_params.ring_mask->rx_err[i] ||
- ab->hw_params.ring_mask->rx_wbm_rel[i] ||
- ab->hw_params.ring_mask->reo_status[i] ||
- ab->hw_params.ring_mask->rxdma2host[i] ||
- ab->hw_params.ring_mask->host2rxdma[i] ||
- ab->hw_params.ring_mask->rx_mon_status[i]) {
- num_irq = 1;
- }
-
- irq_grp->num_irq = num_irq;
- irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
-
- for (j = 0; j < irq_grp->num_irq; j++) {
- int irq_idx = irq_grp->irqs[j];
- int vector = (i % num_vectors) + base_vector;
- int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
-
- ab->irq_num[irq_idx] = irq;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI,
- "irq:%d group:%d\n", irq, i);
-
- irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
- ab_pci->irq_flags,
- "DP_EXT_IRQ", irq_grp);
- if (ret) {
- ath11k_err(ab, "failed request irq %d: %d\n",
- vector, ret);
- return ret;
- }
- }
- ath11k_pci_ext_grp_disable(irq_grp);
- }
-
- return 0;
-}
-
-static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
- const struct cpumask *m)
-{
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return 0;
-
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
-}
-
-static int ath11k_pci_config_irq(struct ath11k_base *ab)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- struct ath11k_ce_pipe *ce_pipe;
- u32 msi_data_start;
- u32 msi_data_count, msi_data_idx;
- u32 msi_irq_start;
- unsigned int msi_data;
- int irq, i, ret, irq_idx;
-
- ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
- "CE", &msi_data_count,
- &msi_data_start, &msi_irq_start);
- if (ret)
- return ret;
-
- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
- if (ret) {
- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
- return ret;
- }
-
- /* Configure CE irqs */
- for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
- irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
- ce_pipe = &ab->ce.ce_pipe[i];
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
-
- tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
-
- ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
- ab_pci->irq_flags, irq_name[irq_idx],
- ce_pipe);
- if (ret) {
- ath11k_err(ab, "failed to request irq %d: %d\n",
- irq_idx, ret);
- goto err_irq_affinity_cleanup;
- }
-
- ab->irq_num[irq_idx] = irq;
- msi_data_idx++;
-
- ath11k_pci_ce_irq_disable(ab, i);
- }
-
- ret = ath11k_pci_ext_irq_config(ab);
- if (ret)
- goto err_irq_affinity_cleanup;
-
- return 0;
-
-err_irq_affinity_cleanup:
- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- return ret;
-}
-
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
@@ -935,19 +351,6 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
&cfg->shadow_reg_v2_len);
}
-static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
-{
- int i;
-
- set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- ath11k_pci_ce_irq_enable(ab, i);
- }
-}
-
static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
{
struct pci_dev *dev = ab_pci->pdev;
@@ -976,18 +379,18 @@ static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
- const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ struct pci_dev *pci_dev = ab_pci->pdev;
struct msi_desc *msi_desc;
int num_vectors;
int ret;
- num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ num_vectors = pci_alloc_irq_vectors(pci_dev,
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
if (num_vectors == msi_config->total_vectors) {
- set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
- ab_pci->irq_flags = IRQF_SHARED;
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
} else {
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
1,
@@ -997,9 +400,8 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
ret = -EINVAL;
goto reset_msi_config;
}
- clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
- ab_pci->msi_config = &msi_config_one_msi;
- ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+ clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ ab->pci.msi.config = &msi_config_one_msi;
ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n");
}
ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
@@ -1013,11 +415,19 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
goto free_msi_vector;
}
- ab_pci->msi_ep_base_data = msi_desc->msg.data;
- if (msi_desc->pci.msi_attrib.is_64)
- set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
+ ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ &ab->pci.msi.addr_lo);
+
+ if (msi_desc->pci.msi_attrib.is_64) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ &ab->pci.msi.addr_hi);
+ } else {
+ ab->pci.msi.addr_hi = 0;
+ }
- ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
return 0;
@@ -1044,10 +454,10 @@ static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
return -EINVAL;
}
- ab_pci->msi_ep_base_data = msi_desc->msg.data;
+ ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
- ab_pci->msi_ep_base_data);
+ ab_pci->ab->pci.msi.ep_base_data);
return 0;
}
@@ -1160,7 +570,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
int ret;
ab_pci->register_window = 0;
- clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
/* Disable ASPM during firmware download due to problems switching
@@ -1176,7 +586,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return ret;
}
- if (ab->bus_params.static_window_map)
+ if (ab->hw_params.static_window_map)
ath11k_pci_select_static_window(ab_pci);
return 0;
@@ -1194,7 +604,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
ath11k_mhi_stop(ab_pci);
- clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -1202,144 +612,67 @@ static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
- ath11k_mhi_suspend(ar_pci);
-
- return 0;
+ return ath11k_mhi_suspend(ar_pci);
}
static int ath11k_pci_hif_resume(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
- ath11k_mhi_resume(ar_pci);
-
- return 0;
-}
-
-static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
-{
- int i;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
-
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- tasklet_kill(&ce_pipe->intr_tq);
- }
+ return ath11k_mhi_resume(ar_pci);
}
-static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab)
+static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
{
- ath11k_pci_ce_irqs_disable(ab);
- ath11k_pci_sync_ce_irqs(ab);
- ath11k_pci_kill_tasklets(ab);
+ ath11k_pcic_ce_irqs_enable(ab);
}
-static void ath11k_pci_stop(struct ath11k_base *ab)
+static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
{
- ath11k_pci_ce_irq_disable_sync(ab);
- ath11k_ce_cleanup_pipes(ab);
+ ath11k_pcic_ce_irq_disable_sync(ab);
}
static int ath11k_pci_start(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
-
/* TODO: for now don't restore ASPM in case of single MSI
* vector as MHI register reading in M2 causes system hang.
*/
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
ath11k_pci_aspm_restore(ab_pci);
else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
- ath11k_pci_ce_irqs_enable(ab);
- ath11k_ce_rx_post_buf(ab);
-
- return 0;
-}
-
-static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
-{
- ath11k_pci_ce_irqs_enable(ab);
-}
-
-static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
-{
- ath11k_pci_ce_irq_disable_sync(ab);
-}
-
-static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe)
-{
- const struct service_to_pipe *entry;
- bool ul_set = false, dl_set = false;
- int i;
-
- for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
- entry = &ab->hw_params.svc_to_ce_map[i];
-
- if (__le32_to_cpu(entry->service_id) != service_id)
- continue;
-
- switch (__le32_to_cpu(entry->pipedir)) {
- case PIPEDIR_NONE:
- break;
- case PIPEDIR_IN:
- WARN_ON(dl_set);
- *dl_pipe = __le32_to_cpu(entry->pipenum);
- dl_set = true;
- break;
- case PIPEDIR_OUT:
- WARN_ON(ul_set);
- *ul_pipe = __le32_to_cpu(entry->pipenum);
- ul_set = true;
- break;
- case PIPEDIR_INOUT:
- WARN_ON(dl_set);
- WARN_ON(ul_set);
- *dl_pipe = __le32_to_cpu(entry->pipenum);
- *ul_pipe = __le32_to_cpu(entry->pipenum);
- dl_set = true;
- ul_set = true;
- break;
- }
- }
-
- if (WARN_ON(!ul_set || !dl_set))
- return -ENOENT;
+ ath11k_pcic_start(ab);
return 0;
}
static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.start = ath11k_pci_start,
- .stop = ath11k_pci_stop,
- .read32 = ath11k_pci_read32,
- .write32 = ath11k_pci_write32,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
.power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend,
.resume = ath11k_pci_hif_resume,
- .irq_enable = ath11k_pci_ext_irq_enable,
- .irq_disable = ath11k_pci_ext_irq_disable,
- .get_msi_address = ath11k_pci_get_msi_address,
- .get_user_msi_vector = ath11k_get_user_msi_assignment,
- .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
- .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx,
+ .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
{
u32 soc_hw_version;
- soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
soc_hw_version);
*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
@@ -1349,6 +682,15 @@ static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *
*major, *minor);
}
+static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
+ return 0;
+
+ return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+}
+
static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
@@ -1357,8 +699,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
u32 soc_hw_version_major, soc_hw_version_minor, addr;
int ret;
- ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
- &ath11k_pci_bus_params);
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
+
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
@@ -1411,11 +753,11 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
- ab_pci->msi_config = &ath11k_msi_config[0];
+
+ ab->pci.ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- ab_pci->msi_config = &ath11k_msi_config[1];
- ab->bus_params.static_window_map = true;
+ ab->pci.ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
@@ -1444,7 +786,8 @@ unsupported_wcn6855_soc:
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
- ab_pci->msi_config = &ath11k_msi_config[0];
+
+ ab->pci.ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -1453,6 +796,12 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ret = ath11k_pci_alloc_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret);
@@ -1481,12 +830,18 @@ unsupported_wcn6855_soc:
ath11k_pci_init_qmi_ce_config(ab);
- ret = ath11k_pci_config_irq(ab);
+ ret = ath11k_pcic_config_irq(ab);
if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret);
goto err_ce_free;
}
+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+ goto err_free_irq;
+ }
+
/* kernel may allocate a dummy vector before request_irq and
* then allocate a real vector when request_irq is called.
* So get msi_data here again to avoid spurious interrupt
@@ -1495,18 +850,21 @@ unsupported_wcn6855_soc:
ret = ath11k_pci_config_msi_data(ab_pci);
if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret);
- goto err_free_irq;
+ goto err_irq_affinity_cleanup;
}
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
- goto err_free_irq;
+ goto err_irq_affinity_cleanup;
}
return 0;
+err_irq_affinity_cleanup:
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
err_free_irq:
- ath11k_pci_free_irq(ab);
+ ath11k_pcic_free_irq(ab);
err_ce_free:
ath11k_ce_free_pipes(ab);
@@ -1550,7 +908,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
qmi_fail:
ath11k_mhi_unregister(ab_pci);
- ath11k_pci_free_irq(ab);
+ ath11k_pcic_free_irq(ab);
ath11k_pci_free_msi(ab_pci);
ath11k_pci_free_region(ab_pci);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 61d67b20a0eb..e9a01f344ec6 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -52,23 +53,8 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
-struct ath11k_msi_user {
- char *name;
- int num_vectors;
- u32 base_vector;
-};
-
-struct ath11k_msi_config {
- int total_vectors;
- int total_users;
- struct ath11k_msi_user *users;
-};
-
enum ath11k_pci_flags {
- ATH11K_PCI_FLAG_INIT_DONE,
- ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE,
- ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath11k_pci {
@@ -76,10 +62,8 @@ struct ath11k_pci {
struct ath11k_base *ab;
u16 dev_id;
char amss_path[100];
- u32 msi_ep_base_data;
struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config;
- unsigned long mhi_state;
u32 register_window;
/* protects register_window above */
@@ -88,8 +72,6 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
-
- unsigned long irq_flags;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
@@ -97,11 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
return (struct ath11k_pci *)ab->drv_priv;
}
-int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector);
-int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
-void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
-u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
-
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
new file mode 100644
index 000000000000..cf12b98c480d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "pcic.h"
+#include "debug.h"
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static const struct ath11k_msi_config ath11k_msi_config[] = {
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ },
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ },
+ {
+ .total_vectors = 28,
+ .total_users = 2,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "CE", .num_vectors = 10, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 10 },
+ },
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ },
+};
+
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
+{
+ const struct ath11k_msi_config *msi_config;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
+ msi_config = &ath11k_msi_config[i];
+
+ if (msi_config->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_msi_config)) {
+ ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->pci.msi.config = msi_config;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+
+static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab,
+ u32 offset)
+{
+ u32 window_start = 0;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ab->hw_params.dp_window_idx * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ab->hw_params.ce_window_idx * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+ int ret = 0;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ if (offset < ATH11K_PCI_WINDOW_START) {
+ iowrite32(value, ab->mem + offset);
+ } else if (ab->hw_params.static_window_map) {
+ window_start = ath11k_pcic_get_window_start(ab, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ } else if (ab->pci.ops->window_write32) {
+ ab->pci.ops->window_write32(ab, offset, value);
+ }
+
+ if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
+ !ret)
+ ab->pci.ops->release(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_write32);
+
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val = 0;
+ u32 window_start;
+ int ret = 0;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ if (offset < ATH11K_PCI_WINDOW_START) {
+ val = ioread32(ab->mem + offset);
+ } else if (ab->hw_params.static_window_map) {
+ window_start = ath11k_pcic_get_window_start(ab, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ } else if (ab->pci.ops->window_read32) {
+ val = ab->pci.ops->window_read32(ab, offset);
+ }
+
+ if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
+ !ret)
+ ab->pci.ops->release(ab);
+
+ return val;
+}
+EXPORT_SYMBOL(ath11k_pcic_read32);
+
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ *msi_addr_lo = ab->pci.msi.addr_lo;
+ *msi_addr_hi = ab->pci.msi.addr_hi;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
+
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
+
+static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+void ath11k_pcic_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pcic_free_ext_irq(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_free_irq);
+
+static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
+}
+
+static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+ struct ath11k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath11k_pcic_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_pcic_ext_grp_enable(irq_grp);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+
+static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pcic_ext_irq_disable(ab);
+ ath11k_pcic_sync_ext_irqs(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
+
+static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+ int i;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ if (!ab->pci.ops->get_msi_irq) {
+ WARN_ONCE(1, "get_msi_irq pci op not defined");
+ return -EOPNOTSUPP;
+ }
+
+ return ab->pci.ops->get_msi_irq(ab, vector);
+}
+
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pcic_get_msi_irq(ab, vector);
+
+ if (irq < 0)
+ return irq;
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
+ irq_flags, "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+ }
+ ath11k_pcic_ext_grp_disable(irq_grp);
+ }
+
+ return 0;
+}
+
+int ath11k_pcic_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ /* Configure CE irqs */
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath11k_pcic_get_msi_irq(ab, msi_data);
+ if (irq < 0)
+ return irq;
+
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
+
+ ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
+ irq_flags, irq_name[irq_idx], ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pcic_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_config_irq);
+
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
+
+static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_disable(ab);
+ ath11k_pcic_sync_ce_irqs(ab);
+ ath11k_pcic_kill_tasklets(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
+
+void ath11k_pcic_stop(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_stop);
+
+int ath11k_pcic_start(struct ath11k_base *ab)
+{
+ set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+
+ ath11k_pcic_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_start);
+
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
new file mode 100644
index 000000000000..c53d86289a8e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_PCI_CMN_H
+#define _ATH11K_PCI_CMN_H
+
+#include "core.h"
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_DP_OFFSET 14
+
+#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
+#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
+#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
+#define ATH11K_PCI_WINDOW_START 0x80000
+#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+void ath11k_pcic_free_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
+void ath11k_pcic_stop(struct ath11k_base *ab);
+int ath11k_pcic_start(struct ath11k_base *ab);
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 332886bc6b33..9e22aaf34b88 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -1,23 +1,22 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
-struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
- const u8 *addr)
+static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
+ int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (peer->vdev_id != vdev_id)
- continue;
- if (!ether_addr_equal(peer->addr, addr))
+ if (peer->peer_id != peer_id)
continue;
return peer;
@@ -26,15 +25,15 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
return NULL;
}
-static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
- u8 pdev_idx, const u8 *addr)
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (peer->pdev_idx != pdev_idx)
+ if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
@@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
- list_for_each_entry(peer, &ab->peers, list) {
- if (!ether_addr_equal(peer->addr, addr))
- continue;
+ if (!ab->rhead_peer_addr)
+ return NULL;
- return peer;
- }
+ peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
+ ab->rhash_peer_addr_param);
- return NULL;
+ return peer;
}
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
@@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
- list_for_each_entry(peer, &ab->peers, list)
- if (peer_id == peer->peer_id)
- return peer;
+ if (!ab->rhead_peer_id)
+ return NULL;
- return NULL;
+ peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
+ ab->rhash_peer_id_param);
+
+ return peer;
}
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
@@ -99,7 +99,7 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, peer_id);
+ peer = ath11k_peer_find_list_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
@@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
return 0;
}
+static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params,
+ void *key)
+{
+ struct ath11k_peer *tmp;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
+
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ ret = rhashtable_remove_fast(rtbl, rhead, *params);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param, &peer->peer_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param, &peer->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ goto err_clean;
+ }
+
+ return 0;
+
+err_clean:
+ ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ return ret;
+}
+
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_peer *peer, *tmp;
@@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
@@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
+ ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
}
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
@@ -217,17 +290,38 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
return 0;
}
-int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
+ struct ath11k_peer *peer;
+ struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, addr);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ath11k_warn(ab,
+ "failed to find peer vdev_id %d addr %pM in delete\n",
+ vdev_id, addr);
+ return -EINVAL;
+ }
+
+ ath11k_peer_rhash_delete(ab, peer);
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath11k_warn(ar->ab,
+ ath11k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
@@ -237,6 +331,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
if (ret)
return ret;
+ return 0;
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = __ath11k_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ar->num_peers--;
return 0;
@@ -263,7 +370,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
}
spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
+ peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
return -EINVAL;
@@ -283,11 +390,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
if (ret)
return ret;
+ mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
@@ -295,6 +404,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
goto cleanup;
}
+ ret = ath11k_peer_rhash_add(ar->ab, peer);
+ if (ret) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ goto cleanup;
+ }
+
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
@@ -319,26 +435,213 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
return 0;
cleanup:
- reinit_completion(&ar->peer_delete_done);
+ fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
- fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
- param->vdev_id);
- if (fbret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- param->vdev_id, param->peer_addr);
- goto exit;
+ return ret;
+}
+
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
}
- fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
- param->peer_addr);
- if (fbret)
- ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n",
- param->peer_addr, param->vdev_id, fbret);
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_id_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_id)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_id);
+ rhash_id_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_id_tbl) {
+ ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_id_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, peer_id);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_id);
+ param->key_len = sizeof_field(struct ath11k_peer, peer_id);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_id_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_id) {
+ ab->rhead_peer_id = rhash_id_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_id_tbl);
+err_free:
+ kfree(rhash_id_tbl);
-exit:
return ret;
}
+
+static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_addr)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_addr);
+ rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_addr_tbl) {
+ ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_addr_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, addr);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
+ param->key_len = sizeof_field(struct ath11k_peer, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr) {
+ ab->rhead_peer_addr = rhash_addr_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_addr_tbl);
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_id);
+ kfree(ab->rhead_peer_id);
+ ab->rhead_peer_id = NULL;
+}
+
+static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_addr)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_addr);
+ kfree(ab->rhead_peer_addr);
+ ab->rhead_peer_addr = NULL;
+}
+
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ret = ath11k_peer_rhash_id_tbl_init(ab);
+ if (ret)
+ goto out;
+
+ ret = ath11k_peer_rhash_addr_tbl_init(ab);
+ if (ret)
+ goto cleanup_tbl;
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup_tbl:
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+out:
+ mutex_unlock(&ab->tbl_mtx_lock);
+ return ret;
+}
+
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ath11k_peer_rhash_addr_tbl_destroy(ab);
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 63fe5665badf..6dd17bafe3a0 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_PEER_H
@@ -20,6 +21,11 @@ struct ath11k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+ /* peer id based rhashtable list pointer */
+ struct rhash_head rhash_id;
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
+
/* Info used in MMIC verification of
* RX fragments
*/
@@ -47,5 +53,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
int vdev_id);
-
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 04e966830c18..d1e945074bc1 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -12,9 +13,14 @@
#include <linux/of_address.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+
+#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
bool ath11k_cold_boot_cal = 1;
EXPORT_SYMBOL(ath11k_cold_boot_cal);
@@ -745,6 +751,68 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -1645,7 +1713,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- if (ab->bus_params.m3_fw_support) {
+ if (ab->hw_params.m3_fw_support) {
req.m3_support_valid = 1;
req.m3_support = 1;
req.m3_cache_support_valid = 1;
@@ -1674,6 +1742,9 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
}
+ if (ab->hw_params.global_reset)
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
ret = qmi_txn_init(&ab->qmi.handle, &txn,
@@ -1728,10 +1799,6 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
req->client_id = QMI_WLANFW_CLIENT_ID;
req->fw_ready_enable_valid = 1;
req->fw_ready_enable = 1;
- req->request_mem_enable_valid = 1;
- req->request_mem_enable = 1;
- req->fw_mem_ready_enable_valid = 1;
- req->fw_mem_ready_enable = 1;
req->cal_done_enable_valid = 1;
req->cal_done_enable = 1;
req->fw_init_done_enable_valid = 1;
@@ -1740,6 +1807,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
req->pin_connect_result_enable_valid = 0;
req->pin_connect_result_enable = 0;
+ /* WCN6750 doesn't request for DDR memory via QMI,
+ * instead it uses a fixed 12MB reserved memory
+ * region in DDR.
+ */
+ if (!ab->hw_params.fixed_fw_mem) {
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ }
+
ret = qmi_txn_init(handle, &txn,
qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
if (ret < 0)
@@ -1797,7 +1875,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!(ab->bus_params.fixed_mem_region ||
+ if (!(ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem_delayed) {
delayed = true;
@@ -1867,7 +1945,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
- if ((ab->bus_params.fixed_mem_region ||
+ if ((ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem[i].iaddr)
iounmap(ab->qmi.target_mem[i].iaddr);
@@ -2001,6 +2079,80 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
return 0;
}
+static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_device_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_device_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ void __iomem *bar_addr_va;
+ int ret;
+
+ /* device info message req is only sent for hybrid bus devices */
+ if (!ab->hw_params.hybrid_bus_type)
+ return 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlfw_device_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_DEVICE_INFO_REQ_V01,
+ QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_device_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi device info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr_valid || !resp.bar_size_valid) {
+ ath11k_warn(ab, "qmi device info response invalid: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr ||
+ resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) {
+ ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n",
+ resp.bar_addr, resp.bar_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size);
+
+ if (!bar_addr_va) {
+ ath11k_warn(ab, "qmi device info ioremap failed\n");
+ ab->mem_len = 0;
+ ret = -EIO;
+ goto out;
+ }
+
+ ab->mem = bar_addr_va;
+ ab->mem_len = resp.bar_size;
+
+ return 0;
+out:
+ return ret;
+}
+
static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
@@ -2008,6 +2160,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
struct qmi_txn txn;
int ret = 0;
int r;
+ char *fw_build_id;
+ int fw_build_id_mask_len;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -2073,6 +2227,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
}
+ fw_build_id = ab->qmi.target.fw_build_id;
+ fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
+ if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
+ fw_build_id = fw_build_id + fw_build_id_mask_len;
+
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
@@ -2080,7 +2239,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
- ab->qmi.target.fw_build_id);
+ fw_build_id);
+
+ r = ath11k_core_check_smbios(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
r = ath11k_core_check_dt(ab);
if (r)
@@ -2107,7 +2270,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
memset(&resp, 0, sizeof(resp));
- if (ab->bus_params.fixed_bdf_addr) {
+ if (ab->hw_params.fixed_bdf_addr) {
bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
if (!bdf_addr) {
ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
@@ -2136,7 +2299,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
req->end = 1;
}
- if (ab->bus_params.fixed_bdf_addr ||
+ if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
req->data_valid = 0;
req->end = 1;
@@ -2145,7 +2308,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
memcpy(req->data, temp, req->data_len);
}
- if (ab->bus_params.fixed_bdf_addr) {
+ if (ab->hw_params.fixed_bdf_addr) {
if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
bdf_addr += ab->hw_params.fw.cal_offset;
@@ -2184,7 +2347,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
goto err_iounmap;
}
- if (ab->bus_params.fixed_bdf_addr ||
+ if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
remaining = 0;
} else {
@@ -2197,7 +2360,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
}
err_iounmap:
- if (ab->bus_params.fixed_bdf_addr)
+ if (ab->hw_params.fixed_bdf_addr)
iounmap(bdf_addr);
err_free_req:
@@ -2336,7 +2499,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- if (!ab->bus_params.m3_fw_support || !m3_mem->vaddr)
+ if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
return;
dma_free_coherent(ab->dev, m3_mem->size,
@@ -2356,7 +2519,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- if (ab->bus_params.m3_fw_support) {
+ if (ab->hw_params.m3_fw_support) {
ret = ath11k_qmi_m3_load(ab);
if (ret) {
ath11k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2684,27 +2847,6 @@ ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
return 0;
}
-static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
-{
- struct ath11k_base *ab = qmi->ab;
- int ret;
-
- ret = ath11k_qmi_fw_ind_register_send(ab);
- if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
- ret);
- return ret;
- }
-
- ret = ath11k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
{
struct ath11k_base *ab = qmi->ab;
@@ -2731,6 +2873,12 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
+ ret = ath11k_qmi_request_device_info(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi device info: %d\n", ret);
+ return ret;
+ }
+
if (ab->hw_params.supports_regdb)
ath11k_qmi_load_bdf_qmi(ab, true);
@@ -2740,9 +2888,33 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ return 0;
+}
+
+static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
+ return ret;
+ }
+
+ if (!ab->hw_params.fixed_fw_mem)
+ return ret;
+
+ ret = ath11k_qmi_event_load_bdf(qmi);
if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret);
+ ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret);
return ret;
}
@@ -2775,7 +2947,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- if (ab->bus_params.fixed_mem_region ||
+ if (ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
@@ -2942,8 +3114,18 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH11K_QMI_EVENT_FW_MEM_READY:
ret = ath11k_qmi_event_load_bdf(qmi);
- if (ret < 0)
+ if (ret < 0) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab,
+ "failed to send qmi m3 info req: %d\n", ret);
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ }
+
break;
case ATH11K_QMI_EVENT_FW_READY:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 61678de56ac7..c24e6995cca3 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@@ -20,6 +21,7 @@
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
@@ -36,6 +38,8 @@
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
+#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000
+
struct ath11k_base;
enum ath11k_qmi_file_type {
@@ -285,10 +289,12 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
char placeholder;
};
-#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
-#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
-#define QMI_WLANFW_CAP_REQ_V01 0x0024
-#define QMI_WLANFW_CAP_RESP_V01 0x0024
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0
enum qmi_wlanfw_pipedir_enum_v01 {
QMI_WLFW_PIPEDIR_NONE_V01 = 0,
@@ -381,6 +387,18 @@ struct qmi_wlanfw_cap_req_msg_v01 {
char placeholder;
};
+struct qmi_wlanfw_device_info_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u64 bar_addr;
+ u32 bar_size;
+ u8 bar_addr_valid;
+ u8 bar_size_valid;
+};
+
#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 81e11cde31d7..79ac2142317a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -83,6 +83,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
*/
if (ar->ab->hw_params.current_cc_support) {
memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
@@ -102,7 +103,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ar->regdom_set_by_user = true;
}
-int ath11k_reg_update_chan_list(struct ath11k *ar)
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
@@ -111,7 +112,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret;
+ int i, ret, left;
+
+ if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -193,11 +219,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
- if (ar->pending_11d) {
- complete(&ar->finish_11d_ch_list);
- ar->pending_11d = false;
- }
-
return ret;
}
@@ -263,15 +284,8 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
- if (ar->pending_11d)
- complete(&ar->finish_11d_scan);
-
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
-
- if (ar->pending_11d)
- reinit_completion(&ar->finish_11d_ch_list);
-
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
@@ -282,7 +296,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar);
+ ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 5fb9dc03a74e..2f284f26378d 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar);
-int ath11k_reg_update_chan_list(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 2b18871d5f7c..516a7b4cd180 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -212,7 +212,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar,
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
@@ -843,9 +846,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar)
{
struct ath11k_spectral *sp = &ar->spectral;
- if (!sp->enabled)
- return;
-
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
@@ -897,15 +897,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab)
if (!sp->enabled)
continue;
- ath11k_spectral_debug_unregister(ar);
- ath11k_spectral_ring_free(ar);
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
-
- sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->enabled = false;
-
spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
}
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index b4f86c45d81f..1410114d1d5c 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -390,6 +391,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
ab->target_pdev_count++;
+ if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return -EINVAL;
+
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
* handled.
@@ -397,7 +402,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
- } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
@@ -407,8 +414,6 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
pdev_cap->nss_ratio_info =
WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
- } else {
- return -EINVAL;
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
@@ -2015,7 +2020,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
{
/* setup commonly used values */
arg->scan_req_id = 1;
- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
@@ -5786,9 +5794,9 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
arvif->bssid,
NULL);
if (!sta) {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
- ret = -EPROTO;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
goto exit;
}
@@ -5886,8 +5894,9 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
"wmi stats vdev id %d snr %d\n",
src->vdev_id, src->beacon_snr);
} else {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for vdev stat\n",
+ arvif->bssid);
}
}
@@ -6350,8 +6359,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
const void **tb;
- int ret;
+ int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -6377,6 +6388,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
kfree(tb);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
@@ -7285,47 +7303,64 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
rcu_read_unlock();
}
-static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
{
- const void **tb;
const struct wmi_service_available_event *ev;
- int ret;
+ u32 *wmi_ext2_service_bitmap;
int i, j;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
+ switch (tag) {
+ case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+ ev = (struct wmi_service_available_event *)ptr;
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
- if (!ev) {
- ath11k_warn(ab, "failed to fetch svc available ev");
- kfree(tb);
- return;
- }
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ ev->wmi_service_segment_bitmap[0],
+ ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2],
+ ev->wmi_service_segment_bitmap[3]);
+ break;
+ case WMI_TAG_ARRAY_UINT32:
+ wmi_ext2_service_bitmap = (u32 *)ptr;
+ for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i++) {
+ do {
+ if (wmi_ext2_service_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- /* TODO: Use wmi_service_segment_offset information to get the service
- * especially when more services are advertised in multiple sevice
- * available events.
- */
- for (i = 0, j = WMI_MAX_SERVICE;
- i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
- i++) {
- do {
- if (ev->wmi_service_segment_bitmap[i] &
- BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
- set_bit(j, ab->wmi_ab.svc_map);
- } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+ wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+ break;
}
+ return 0;
+}
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
- ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
- ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ int ret;
- kfree(tb);
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_services_parser,
+ NULL);
+ if (ret)
+ ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
}
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -7765,6 +7800,56 @@ exit:
kfree(tb);
}
+static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath11k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
+ ev->refresh_cnt);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
+ NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
+
+ replay_ctr = ev->replay_ctr.word1;
+ replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
+ arvif->rekey_data.replay_ctr = replay_ctr;
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7896,6 +7981,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath11k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -8143,7 +8231,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
@@ -8165,6 +8253,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
ath11k_wmi_free_dbring_caps(ab);
}
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable;
+
+ /* Set all modes in case of disable */
+ if (cmd->enable)
+ cmd->hw_filter_bitmap = filter_bitmap;
+ else
+ cmd->hw_filter_bitmap = ((u32)~0U);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ enable, filter_bitmap);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
{
struct wmi_wow_host_wakeup_ind *cmd;
@@ -8235,3 +8356,606 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
}
+
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->is_add = enable;
+ cmd->event_bitmap = (1 << event);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_ADD_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
+ bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_BITMAP_PATTERN_T) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
+ bitmap->pattern_offset = pattern_offset;
+ bitmap->pattern_len = pattern_len;
+ bitmap->bitmask_len = pattern_len;
+ bitmap->pattern_id = pattern_id;
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_DEL_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 *channel_list;
+ size_t len, nlo_list_len, channel_list_len;
+ u8 *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = pno->vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = pno->active_max_time;
+ cmd->passive_dwell_time = pno->passive_max_time;
+
+ if (pno->do_passive_scan)
+ cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
+
+ cmd->fast_scan_period = pno->fast_scan_period;
+ cmd->slow_scan_period = pno->slow_scan_period;
+ cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
+ cmd->delay_start_time = pno->delay_start_time;
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
+ ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = pno->uc_networks_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = (struct nlo_configured_parameters *)ptr;
+ for (i = 0; i < cmd->no_of_ssids; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
+
+ nlo_list[i].ssid.valid = true;
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ nlo_list[i].ssid.ssid.ssid_len);
+ ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
+ roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = true;
+ nlo_list[i].rssi_cond.rssi =
+ pno->a_networks[i].rssi_threshold;
+ }
+
+ nlo_list[i].bcast_nw_type.valid = true;
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ pno->a_networks[i].bcast_nw_type;
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = pno->a_networks[0].channel_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = (u32 *)ptr;
+ for (i = 0; i < cmd->num_of_channels; i++)
+ channel_list[i] = pno->a_networks[0].channels[i];
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_STOP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_tuple *ns;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = (struct wmi_tlv *)buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = (struct wmi_ns_offload_tuple *)buf_ptr;
+ ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= WMI_NSOL_FLAGS_VALID;
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+ ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
+ ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+ ath11k_ce_byte_swap(ns->target_mac.addr, 8);
+
+ if (ns->target_mac.word0 != 0 ||
+ ns->target_mac.word1 != 0) {
+ ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_tuple *arp;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = (struct wmi_arp_offload_tuple *)buf_ptr;
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = WMI_ARPOL_FLAGS_VALID;
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+ ath11k_ce_byte_swap(arp->target_ipaddr, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct ath11k_arp_ns_offload *offload;
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ offload = &arvif->arp_ns_offload;
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
+ }
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->flags = 0;
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->num_ns_ext_tuples = ns_ext_tuples;
+
+ buf_ptr += sizeof(*cmd);
+
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+ int len;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+
+ if (enable) {
+ cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+ ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
+ } else {
+ cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ int len;
+ struct sk_buff *skb;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
+{ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, sar_len_aligned, rsvd_len_aligned;
+
+ sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sar_len_aligned +
+ TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->sar_len = BIOS_SAR_TABLE_LEN;
+ cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, rsvd_len_aligned;
+
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 587f42307250..7600e9a52da8 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -13,6 +13,7 @@ struct ath11k_base;
struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
+struct ath11k_vif;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -284,6 +285,11 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_GET_TPC_STATS_CMDID,
+ WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID,
+ WMI_PDEV_GET_DPD_STATUS_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -1858,6 +1864,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
};
@@ -1991,6 +1999,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+ /* The first 128 bits */
WMI_MAX_SERVICE = 128,
WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
@@ -2083,7 +2092,12 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_EXT2_MSG = 220,
WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
- WMI_MAX_EXT_SERVICE
+ /* The second 128 bits */
+ WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
+
+ /* The third 128 bits */
+ WMI_MAX_EXT2_SERVICE = 384
};
enum {
@@ -3088,9 +3102,6 @@ enum scan_dwelltime_adaptive_mode {
SCAN_DWELL_MODE_STATIC = 4
};
-#define WLAN_SCAN_MAX_NUM_SSID 10
-#define WLAN_SCAN_MAX_NUM_BSSID 10
-
#define WLAN_SSID_MAX_LEN 32
struct element_info {
@@ -3105,7 +3116,6 @@ struct wlan_ssid {
#define WMI_IE_BITMAP_SIZE 8
-#define WMI_SCAN_MAX_NUM_SSID 0x0A
/* prefix used by scan requestor ids on the host */
#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
@@ -3113,10 +3123,6 @@ struct wlan_ssid {
/* host cycles through the lower 12 bits to generate ids */
#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
*/
@@ -3312,8 +3318,8 @@ struct scan_req_params {
u32 n_probes;
u32 *chan_list;
u32 notify_scan_events;
- struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
- struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
struct element_info extraie;
struct element_info htcap;
struct element_info vhtcap;
@@ -5377,7 +5383,7 @@ struct ath11k_wmi_base {
struct completion service_ready;
struct completion unified_ready;
- DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
wait_queue_head_t tx_credits_wq;
const struct wmi_peer_flags_map *peer_flags;
u32 num_mem_chunks;
@@ -5390,6 +5396,19 @@ struct ath11k_wmi_base {
struct ath11k_targ_cap *targ_cap;
};
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+ u32 hw_filter_bitmap;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -5534,6 +5553,45 @@ static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
#undef C2S
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_add_del_event_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 is_add;
+ u32 event_bitmap;
+} __packed;
+
struct wmi_wow_enable_cmd {
u32 tlv_header;
u32 enable;
@@ -5546,13 +5604,309 @@ struct wmi_wow_host_wakeup_ind {
u32 reserved;
} __packed;
-struct wmi_wow_ev_arg {
+struct wmi_tlv_wow_event_info {
u32 vdev_id;
u32 flag;
- enum wmi_wow_wake_reason wake_reason;
+ u32 wake_reason;
u32 data_len;
+} __packed;
+
+struct wmi_wow_bitmap_pattern {
+ u32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ u32 pattern_offset;
+ u32 pattern_len;
+ u32 bitmask_len;
+ u32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_param {
+ u32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ u32 valid;
+ u32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ u32 valid;
+ u32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ u32 valid;
+ u32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ u32 valid;
+ s32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ u32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
};
+struct wmi_wow_nlo_config_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 fast_scan_max_cycles;
+ u32 active_dwell_time;
+ u32 passive_dwell_time;
+ u32 probe_bundle_size;
+
+ /* ART = IRT */
+ u32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ u32 max_rest_time;
+
+ /* SBM */
+ u32 scan_backoff_multiplier;
+
+ /* SCBM */
+ u32 fast_scan_period;
+
+ /* specific to windows */
+ u32 slow_scan_period;
+
+ u32 no_of_ssids;
+
+ u32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ u32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct wmi_mac_addr mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct wmi_mac_addr mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ u32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ u32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ u32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_replayc_cnt {
+ union {
+ u8 counter[GTK_REPLAY_COUNTER_BYTES];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_gtk_offload_status_event {
+ u32 vdev_id;
+ u32 flags;
+ u32 refresh_cnt;
+ struct wmi_replayc_cnt replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+#define BIOS_SAR_TABLE_LEN (22)
+#define BIOS_SAR_RSVD1_LEN (6)
+#define BIOS_SAR_RSVD2_LEN (18)
+
+struct wmi_pdev_set_sar_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sar_len;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_pdev_set_geo_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 rsvd_len;
+} __packed;
+
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -5714,4 +6068,24 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
struct ath11k_fw_dbglog *dbglog);
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id);
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable);
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif);
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val);
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 43c62e99dd0e..9d088cebef03 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -6,11 +6,24 @@
#include <linux/delay.h>
#include "mac.h"
+
+#include <net/mac80211.h>
#include "core.h"
#include "hif.h"
#include "debug.h"
#include "wmi.h"
#include "wow.h"
+#include "dp_rx.h"
+
+static const struct wiphy_wowlan_support ath11k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
int ath11k_wow_enable(struct ath11k_base *ab)
{
@@ -71,3 +84,753 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
return 0;
}
+
+static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Compute new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Compute new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath11k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
+ int j;
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
+
+ memcpy(pattern_ext, old_pattern.pattern,
+ old_pattern.pattern_len);
+ old_pattern.pattern = pattern_ext;
+ ath11k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ } else {
+ new_pattern = old_pattern;
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_wakeups(struct ath11k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
+{
+ int ret = 0;
+ struct ath11k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 bitmap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
+ bitmap,
+ true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath11k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath11k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath11k_mac_drain_tx(ar);
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_enable(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx pktlog during wow suspend: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath11k_ce_stop_shadow_timers(ar->ab);
+ ath11k_dp_stop_shadow_timers(ar->ab);
+
+ ath11k_hif_irq_disable(ar->ab);
+ ath11k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath11k_hif_suspend(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath11k_wow_wakeup(ar->ab);
+
+cleanup:
+ ath11k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath11k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_hif_resume(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath11k_hif_ce_irq_enable(ar->ab);
+ ath11k_hif_irq_enable(ar->ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_wakeup(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH11K_STATE_OFF:
+ case ATH11K_STATE_RESTARTING:
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_wow_init(struct ath11k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath11k_wowlan_support;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
index dabc4ee63cf6..553ba850d910 100644
--- a/drivers/net/wireless/ath/ath11k/wow.h
+++ b/drivers/net/wireless/ath/ath11k/wow.h
@@ -3,8 +3,53 @@
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath11k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
#define ATH11K_WOW_RETRY_NUM 3
#define ATH11K_WOW_RETRY_WAIT_MS 200
+#define ATH11K_WOW_PATTERNS 22
+#ifdef CONFIG_PM
+
+int ath11k_wow_init(struct ath11k *ar);
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath11k_wow_op_resume(struct ieee80211_hw *hw);
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
int ath11k_wow_enable(struct ath11k_base *ab);
int ath11k_wow_wakeup(struct ath11k_base *ab);
+
+#else
+
+static inline int ath11k_wow_init(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index e3874421c4c0..1963d3145481 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1538,7 +1538,7 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
queue, n_msg);
/*
- * This is due to unavailabilty of buffers to rx entire data.
+ * This is due to unavailability of buffers to rx entire data.
* Return no error so that free buffers from queue can be used
* to receive partial data.
*/
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index cdefb8e2daf1..9cd12b20b18d 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -98,13 +98,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
- }
-
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ath9k_fill_chanctx_ops();
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index fba5a847c3bb..a8c0e8e2d78c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -301,10 +301,11 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3));
- WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
+ WRITE_ONCE(ads->ds_ctl7,
+ set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
+ | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
+ | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
+ | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index dc24da1ff00b..6ca089f15629 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -177,7 +177,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
int i;
/* Accumulate IQ cal measures for active chains */
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) {
ah->totalPowerMeasI[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b0a4ca3559fd..16bfcd0a1f6e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3911,7 +3911,7 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
}
/* Test value. if 0 then attenuation is unused. Don't load anything. */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) {
value = ar9003_hw_atten_chain_get(ah, i, chan);
REG_RMW_FIELD(ah, ext_atten_reg[i],
@@ -4747,7 +4747,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
}
static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
- int mode,
+ bool is2ghz,
int ipier,
int ichain,
int *pfrequency,
@@ -4757,7 +4757,6 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
{
u8 *pCalPier;
struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
- int is2GHz;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
struct ath_common *common = ath9k_hw_common(ah);
@@ -4768,17 +4767,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
return -1;
}
- if (mode) { /* 5GHz */
- if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_dbg(common, EEPROM,
- "Invalid 5GHz cal pier index, must be less than %d\n",
- AR9300_NUM_5G_CAL_PIERS);
- return -1;
- }
- pCalPier = &(eep->calFreqPier5G[ipier]);
- pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
- is2GHz = 0;
- } else {
+ if (is2ghz) {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n",
@@ -4788,10 +4777,18 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
pCalPier = &(eep->calFreqPier2G[ipier]);
pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
- is2GHz = 1;
+ } else {
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_dbg(common, EEPROM,
+ "Invalid 5GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
}
- *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
+ *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2ghz);
*pcorrection = pCalPierStruct->refPower;
*ptemperature = pCalPierStruct->tempMeas;
*pvoltage = pCalPierStruct->voltMeas;
@@ -4960,7 +4957,6 @@ tempslope:
static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
{
int ichain, ipier, npier;
- int mode;
int lfrequency[AR9300_MAX_CHAINS],
lcorrection[AR9300_MAX_CHAINS],
ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
@@ -4976,12 +4972,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
int pfrequency, pcorrection, ptemperature, pvoltage,
pnf_cal, pnf_pwr;
struct ath_common *common = ath9k_hw_common(ah);
+ bool is2ghz = frequency < 4000;
- mode = (frequency >= 4000);
- if (mode)
- npier = AR9300_NUM_5G_CAL_PIERS;
- else
+ if (is2ghz)
npier = AR9300_NUM_2G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_5G_CAL_PIERS;
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
lfrequency[ichain] = 0;
@@ -4990,7 +4986,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* identify best lower and higher frequency calibration measurement */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
for (ipier = 0; ipier < npier; ipier++) {
- if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ if (!ar9003_hw_cal_pier_get(ah, is2ghz, ipier, ichain,
&pfrequency, &pcorrection,
&ptemperature, &pvoltage,
&pnf_cal, &pnf_pwr)) {
@@ -5126,13 +5122,13 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
frequency, correction[0], correction[1], correction[2]);
/* Store calibrated noise floor values */
- for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++)
- if (mode) {
- ah->nf_5g.cal[ichain] = nf_cal[ichain];
- ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
- } else {
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++)
+ if (is2ghz) {
ah->nf_2g.cal[ichain] = nf_cal[ichain];
ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
+ } else {
+ ah->nf_5g.cal[ichain] = nf_cal[ichain];
+ ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
}
return 0;
@@ -5449,8 +5445,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize];
u8 target_power_val_t2_eep[ar9300RateSize];
u8 targetPowerValT2_tpc[ar9300RateSize];
@@ -5465,17 +5459,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
if (ar9003_is_paprd_enabled(ah)) {
- if (IS_CHAN_2GHZ(chan))
- modal_hdr = &eep->modalHeader2G;
- else
- modal_hdr = &eep->modalHeader5G;
-
ah->paprd_ratemask =
- le32_to_cpu(modal_hdr->papdRateMaskHt20) &
+ ar9003_get_paprd_rate_mask_ht20(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK;
ah->paprd_ratemask_ht40 =
- le32_to_cpu(modal_hdr->papdRateMaskHt40) &
+ ar9003_get_paprd_rate_mask_ht40(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK;
paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
@@ -5592,30 +5581,40 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
return ar9003_modal_header(ah, is2ghz)->spurChans;
}
+u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt20);
+}
+
+u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt40);
+}
+
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ bool is2ghz = IS_CHAN_2GHZ(chan);
- if (IS_CHAN_2GHZ(chan))
- return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20),
+ if (is2ghz)
+ return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
else {
if (chan->channel >= 5700)
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
+ return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
else if (chan->channel >= 5400)
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_2);
else
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
}
}
static u8 ar9003_get_eepmisc(struct ath_hw *ah)
{
- return ah->eeprom.map4k.baseEepHeader.eepMisc;
+ return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
}
const struct eeprom_ops eep_ar9300_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index e8fda54acfe3..f8ae20318302 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -363,6 +363,8 @@ u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
+u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5184a0aacfe2..ff8ab58e67d9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -144,10 +144,11 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3));
- WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
+ WRITE_ONCE(ads->ctl18,
+ set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
+ | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
+ | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
+ | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ctl19, AR_Not_Sounding);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 34e100940284..b2d53b6c0ffd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -21,7 +21,7 @@
void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{
struct ath9k_channel *chan = ah->curchan;
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ bool is2ghz = IS_CHAN_2GHZ(chan);
/*
* 3 bits for modalHeader5G.papdRateMaskHt20
@@ -36,17 +36,17 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
* -- disable PAPRD for lower band 5GHz
*/
- if (IS_CHAN_5GHZ(chan)) {
+ if (!is2ghz) {
if (chan->channel >= UPPER_5G_SUB_BAND_START) {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(30))
val = false;
} else if (chan->channel >= MID_5G_SUB_BAND_START) {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(29))
val = false;
} else {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(28))
val = false;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index daf30f9946b4..dc0e5ea25673 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -523,21 +523,10 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
int synth_freq;
int range = 10;
int freq_offset = 0;
- int mode;
- u8* spurChansPtr;
+ u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
unsigned int i;
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-
- if (IS_CHAN_5GHZ(chan)) {
- spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
- mode = 0;
- }
- else {
- spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
- mode = 1;
- }
- if (spurChansPtr[0] == 0)
+ if (spur_fbin_ptr[0] == 0)
return; /* No spur in the mode */
if (IS_CHAN_HT40(chan)) {
@@ -554,16 +543,18 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
ar9003_hw_spur_ofdm_clear(ah);
- for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
- freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS && spur_fbin_ptr[i]; i++) {
+ freq_offset = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
+ IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq;
if (abs(freq_offset) < range) {
ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
range, synth_freq);
if (AR_SREV_9565(ah) && (i < 4)) {
- freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
- mode);
+ freq_offset =
+ ath9k_hw_fbin2freq(spur_fbin_ptr[i + 1],
+ IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq;
if (abs(freq_offset) < range)
ar9003_hw_spur_ofdm_9565(ah, freq_offset);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index a171dbb29fbb..ad949eb02f3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -720,7 +720,7 @@
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
-#define AR_CH0_TOP2_XPABIASLVL_S 12
+#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d95cabddce33..1e2a30019fb6 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -36,7 +36,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- if (!an->sta->ht_cap.ht_supported) {
+ if (!an->sta->deflink.ht_cap.ht_supported) {
len = scnprintf(buf, size, "%s\n",
"HT not supported");
goto exit;
@@ -186,7 +186,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
band = ah->curchan->chan->band;
rstats = &an->rx_rate_stats;
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
goto legacy;
len += scnprintf(buf + len, size - len,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f06eec99de68..518deb5098a2 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -368,10 +368,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
__skb_queue_head_init(&tx_buf->skb_queue);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
- }
-
- if (!ret)
+ } else {
TX_STAT_INC(buf_queued);
+ }
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 72ef319feeda..cfee732a89b1 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -491,7 +491,7 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
ista->index = sta_idx;
tsta.is_vif_sta = 0;
maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
+ sta->deflink.ht_cap.ampdu_factor);
tsta.maxampdu = cpu_to_be16(maxampdu);
} else {
memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
@@ -602,7 +602,7 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
for (i = 0, j = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
+ if (sta->deflink.supp_rates[sband->band] & BIT(i)) {
trate->rates.legacy_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
@@ -610,9 +610,9 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
}
trate->rates.legacy_rates.rs_nrates = j;
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ if (sta->deflink.ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
trate->rates.ht_rates.rs_rates[j++] = i;
if (j == ATH_HTC_RATE_MAX)
break;
@@ -620,18 +620,18 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
caps |= ATH_RC_TX_STBC_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1])
+ if (sta->deflink.ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- (conf_is_ht40(&priv->hw->conf)))
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
+ (conf_is_ht40(&priv->hw->conf)))
caps |= WLAN_RC_40_FLAG;
if (conf_is_ht40(&priv->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
caps |= WLAN_RC_SGI_FLAG;
else if (conf_is_ht20(&priv->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
caps |= WLAN_RC_SGI_FLAG;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6a850a0bfa8a..a23eaca0326d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1016,6 +1016,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
goto rx_next;
}
+ if (rxstatus->rs_keyix >= ATH_KEYMAX &&
+ rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) {
+ ath_dbg(common, ANY,
+ "Invalid keyix, dropping (keyix: %d)\n",
+ rxstatus->rs_keyix);
+ goto rx_next;
+ }
+
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index fd6aa49adadf..af44b33814dd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -35,8 +35,10 @@
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
- AR_STBC##_index : 0) \
- |SM((_series)[_index].ChSel, AR_ChainSel##_index))
+ AR_STBC##_index : 0))
+
+#define set11nChainSel(_series, _index) \
+ (SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
#define CCK_PREAMBLE_BITS 144
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e2791d45f5f5..77144647f4fc 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2048,7 +2048,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
atid = ath_node_to_tid(an, tid);
atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
- sta->ht_cap.ampdu_factor;
+ sta->deflink.ht_cap.ampdu_factor;
break;
default:
ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 653e79611830..8983ea6fc727 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -834,8 +834,8 @@
((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
-#define AR_SREV_9100(ah) \
- ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100)
+#define AR_SREV_9100(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9100))
#define AR_SREV_9100_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
@@ -891,7 +891,7 @@
#define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
#define AR_SREV_9300_22(_ah) \
- (AR_SREV_9300(ah) && \
+ (AR_SREV_9300((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
#define AR_SREV_9330(_ah) \
@@ -994,8 +994,8 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
#define AR_SREV_SOC(_ah) \
- (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
- AR_SREV_9561(ah))
+ (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(_ah) || \
+ AR_SREV_9561(_ah))
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index db83cc4ba810..ba16a7f3e23d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1271,7 +1271,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
int phy;
if (!rates[i].count || (rates[i].idx < 0))
- continue;
+ break;
rix = rates[i].idx;
info->rates[i].Tries = rates[i].count;
@@ -1574,10 +1574,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
*/
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor)) - 1;
- density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ sta->deflink.ht_cap.ampdu_factor)) - 1;
+ density = ath9k_parse_mpdudensity(sta->deflink.ht_cap.ampdu_density);
an->mpdudensity = density;
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 76e84adf57c1..101295162967 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1306,8 +1306,8 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
atomic_set(&sta_info->pending_frames, 0);
- if (sta->ht_cap.ht_supported) {
- if (sta->ht_cap.ampdu_density > 6) {
+ if (sta->deflink.ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ampdu_density > 6) {
/*
* HW does support 16us AMPDU density.
* No HT-Xmit for station.
@@ -1319,7 +1319,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
RCU_INIT_POINTER(sta_info->agg[i], NULL);
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
+ sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor);
sta_info->ht_sta = true;
}
@@ -1335,7 +1335,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
unsigned int i;
bool cleanup = false;
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
sta_info->ht_sta = false;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 1b76f4434c06..514f568d9d07 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1044,8 +1044,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
if (unlikely(!sta || !cvif))
goto err_out;
- factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
- density = sta->ht_cap.ampdu_density;
+ factor = min_t(unsigned int, 1u,
+ sta->deflink.ht_cap.ampdu_factor);
+ density = sta->deflink.ht_cap.ampdu_density;
if (density) {
/*
@@ -1558,6 +1559,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
goto out;
}
} while (ar->beacon_enabled && i--);
+
+ /* no entry found in list */
+ return NULL;
}
out:
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 2a1db9756fd5..46a49f0a51b3 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2626,7 +2626,12 @@ enum tx_rate_info {
HAL_TX_RATE_SGI = 0x8,
/* Rate with Long guard interval */
- HAL_TX_RATE_LGI = 0x10
+ HAL_TX_RATE_LGI = 0x10,
+
+ /* VHT rates */
+ HAL_TX_RATE_VHT20 = 0x20,
+ HAL_TX_RATE_VHT40 = 0x40,
+ HAL_TX_RATE_VHT80 = 0x80,
};
struct ani_global_class_a_stats_info {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 95ea7d040d8c..e34d3d0b7082 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -192,70 +192,74 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
+#define DEFINE(s) [s] = #s
+
static const char * const wcn36xx_caps_names[] = {
- "MCC", /* 0 */
- "P2P", /* 1 */
- "DOT11AC", /* 2 */
- "SLM_SESSIONIZATION", /* 3 */
- "DOT11AC_OPMODE", /* 4 */
- "SAP32STA", /* 5 */
- "TDLS", /* 6 */
- "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
- "WLANACTIVE_OFFLOAD", /* 8 */
- "BEACON_OFFLOAD", /* 9 */
- "SCAN_OFFLOAD", /* 10 */
- "ROAM_OFFLOAD", /* 11 */
- "BCN_MISS_OFFLOAD", /* 12 */
- "STA_POWERSAVE", /* 13 */
- "STA_ADVANCED_PWRSAVE", /* 14 */
- "AP_UAPSD", /* 15 */
- "AP_DFS", /* 16 */
- "BLOCKACK", /* 17 */
- "PHY_ERR", /* 18 */
- "BCN_FILTER", /* 19 */
- "RTT", /* 20 */
- "RATECTRL", /* 21 */
- "WOW", /* 22 */
- "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
- "SPECULATIVE_PS_POLL", /* 24 */
- "SCAN_SCH", /* 25 */
- "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
- "WLAN_SCAN_OFFLOAD", /* 27 */
- "WLAN_PERIODIC_TX_PTRN", /* 28 */
- "ADVANCE_TDLS", /* 29 */
- "BATCH_SCAN", /* 30 */
- "FW_IN_TX_PATH", /* 31 */
- "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
- "CH_SWITCH_V1", /* 33 */
- "HT40_OBSS_SCAN", /* 34 */
- "UPDATE_CHANNEL_LIST", /* 35 */
- "WLAN_MCADDR_FLT", /* 36 */
- "WLAN_CH144", /* 37 */
- "NAN", /* 38 */
- "TDLS_SCAN_COEXISTENCE", /* 39 */
- "LINK_LAYER_STATS_MEAS", /* 40 */
- "MU_MIMO", /* 41 */
- "EXTENDED_SCAN", /* 42 */
- "DYNAMIC_WMM_PS", /* 43 */
- "MAC_SPOOFED_SCAN", /* 44 */
- "BMU_ERROR_GENERIC_RECOVERY", /* 45 */
- "DISA", /* 46 */
- "FW_STATS", /* 47 */
- "WPS_PRBRSP_TMPL", /* 48 */
- "BCN_IE_FLT_DELTA", /* 49 */
- "TDLS_OFF_CHANNEL", /* 51 */
- "RTT3", /* 52 */
- "MGMT_FRAME_LOGGING", /* 53 */
- "ENHANCED_TXBD_COMPLETION", /* 54 */
- "LOGGING_ENHANCEMENT", /* 55 */
- "EXT_SCAN_ENHANCED", /* 56 */
- "MEMORY_DUMP_SUPPORTED", /* 57 */
- "PER_PKT_STATS_SUPPORTED", /* 58 */
- "EXT_LL_STAT", /* 60 */
- "WIFI_CONFIG", /* 61 */
- "ANTENNA_DIVERSITY_SELECTION", /* 62 */
+ DEFINE(MCC),
+ DEFINE(P2P),
+ DEFINE(DOT11AC),
+ DEFINE(SLM_SESSIONIZATION),
+ DEFINE(DOT11AC_OPMODE),
+ DEFINE(SAP32STA),
+ DEFINE(TDLS),
+ DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
+ DEFINE(WLANACTIVE_OFFLOAD),
+ DEFINE(BEACON_OFFLOAD),
+ DEFINE(SCAN_OFFLOAD),
+ DEFINE(ROAM_OFFLOAD),
+ DEFINE(BCN_MISS_OFFLOAD),
+ DEFINE(STA_POWERSAVE),
+ DEFINE(STA_ADVANCED_PWRSAVE),
+ DEFINE(AP_UAPSD),
+ DEFINE(AP_DFS),
+ DEFINE(BLOCKACK),
+ DEFINE(PHY_ERR),
+ DEFINE(BCN_FILTER),
+ DEFINE(RTT),
+ DEFINE(RATECTRL),
+ DEFINE(WOW),
+ DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
+ DEFINE(SPECULATIVE_PS_POLL),
+ DEFINE(SCAN_SCH),
+ DEFINE(IBSS_HEARTBEAT_OFFLOAD),
+ DEFINE(WLAN_SCAN_OFFLOAD),
+ DEFINE(WLAN_PERIODIC_TX_PTRN),
+ DEFINE(ADVANCE_TDLS),
+ DEFINE(BATCH_SCAN),
+ DEFINE(FW_IN_TX_PATH),
+ DEFINE(EXTENDED_NSOFFLOAD_SLOT),
+ DEFINE(CH_SWITCH_V1),
+ DEFINE(HT40_OBSS_SCAN),
+ DEFINE(UPDATE_CHANNEL_LIST),
+ DEFINE(WLAN_MCADDR_FLT),
+ DEFINE(WLAN_CH144),
+ DEFINE(NAN),
+ DEFINE(TDLS_SCAN_COEXISTENCE),
+ DEFINE(LINK_LAYER_STATS_MEAS),
+ DEFINE(MU_MIMO),
+ DEFINE(EXTENDED_SCAN),
+ DEFINE(DYNAMIC_WMM_PS),
+ DEFINE(MAC_SPOOFED_SCAN),
+ DEFINE(BMU_ERROR_GENERIC_RECOVERY),
+ DEFINE(DISA),
+ DEFINE(FW_STATS),
+ DEFINE(WPS_PRBRSP_TMPL),
+ DEFINE(BCN_IE_FLT_DELTA),
+ DEFINE(TDLS_OFF_CHANNEL),
+ DEFINE(RTT3),
+ DEFINE(MGMT_FRAME_LOGGING),
+ DEFINE(ENHANCED_TXBD_COMPLETION),
+ DEFINE(LOGGING_ENHANCEMENT),
+ DEFINE(EXT_SCAN_ENHANCED),
+ DEFINE(MEMORY_DUMP_SUPPORTED),
+ DEFINE(PER_PKT_STATS_SUPPORTED),
+ DEFINE(EXT_LL_STAT),
+ DEFINE(WIFI_CONFIG),
+ DEFINE(ANTENNA_DIVERSITY_SELECTION),
};
+#undef DEFINE
+
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
{
if (x >= ARRAY_SIZE(wcn36xx_caps_names))
@@ -788,7 +792,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
int i, size;
u16 *rates_table;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
- u32 rates = sta->supp_rates[band];
+ u32 rates = sta->deflink.supp_rates[band];
memset(&sta_priv->supported_rates, 0,
sizeof(sta_priv->supported_rates));
@@ -814,20 +818,20 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
}
}
- if (sta->ht_cap.ht_supported) {
- BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
- sizeof(sta_priv->supported_rates.supported_mcs_set));
+ if (sta->deflink.ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->deflink.ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
memcpy(sta_priv->supported_rates.supported_mcs_set,
- sta->ht_cap.mcs.rx_mask,
- sizeof(sta->ht_cap.mcs.rx_mask));
+ sta->deflink.ht_cap.mcs.rx_mask,
+ sizeof(sta->deflink.ht_cap.mcs.rx_mask));
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
sta_priv->supported_rates.op_rate_mode = STA_11ac;
sta_priv->supported_rates.vht_rx_mcs_map =
- sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
sta_priv->supported_rates.vht_tx_mcs_map =
- sta->vht_cap.vht_mcs.tx_mcs_map;
+ sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
}
@@ -1400,6 +1404,21 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct station_info *sinfo)
+{
+ struct wcn36xx *wcn;
+ u8 sta_index;
+ int status;
+
+ wcn = hw->priv;
+ sta_index = get_sta_index(vif, wcn36xx_sta_to_priv(sta));
+ status = wcn36xx_smd_get_stats(wcn, sta_index, HAL_GLOBAL_CLASS_A_STATS_INFO, sinfo);
+
+ if (status)
+ wcn36xx_err("wcn36xx_smd_get_stats failed\n");
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1423,6 +1442,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove,
+ .sta_statistics = wcn36xx_sta_statistics,
.ampdu_action = wcn36xx_ampdu_action,
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 59ad332156ae..7ac9a1e6f768 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -208,9 +208,9 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
{
if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
- else if (sta && sta->ht_cap.ht_supported)
+ else if (sta && sta->deflink.ht_cap.ht_supported)
bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
- else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
+ else if (sta && (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0x7f))
bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
else
bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -225,9 +225,10 @@ static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
- if (sta && sta->ht_cap.ht_supported) {
- unsigned long caps = sta->ht_cap.cap;
- bss_params->ht = sta->ht_cap.ht_supported;
+ if (sta && sta->deflink.ht_cap.ht_supported) {
+ unsigned long caps = sta->deflink.ht_cap.cap;
+
+ bss_params->ht = sta->deflink.ht_cap.ht_supported;
bss_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
bss_params->lsig_tx_op_protection_full_support =
@@ -250,23 +251,24 @@ wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params_v1 *bss)
{
- if (sta && sta->vht_cap.vht_supported)
+ if (sta && sta->deflink.vht_cap.vht_supported)
bss->vht_capable = 1;
}
static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
- if (sta->ht_cap.ht_supported) {
- unsigned long caps = sta->ht_cap.cap;
- sta_params->ht_capable = sta->ht_cap.ht_supported;
+ if (sta->deflink.ht_cap.ht_supported) {
+ unsigned long caps = sta->deflink.ht_cap.cap;
+
+ sta_params->ht_capable = sta->deflink.ht_cap.ht_supported;
sta_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
sta_params->lsig_txop_protection = is_cap_supported(caps,
IEEE80211_HT_CAP_LSIG_TXOP_PROT);
- sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
- sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_ampdu_size = sta->deflink.ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->deflink.ht_cap.ampdu_density;
/* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */
sta_params->max_amsdu_size = !is_cap_supported(caps,
IEEE80211_HT_CAP_MAX_AMSDU);
@@ -287,10 +289,10 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
- if (sta->vht_cap.vht_supported) {
- unsigned long caps = sta->vht_cap.cap;
+ if (sta->deflink.vht_cap.vht_supported) {
+ unsigned long caps = sta->deflink.vht_cap.cap;
- sta_params->vht_capable = sta->vht_cap.vht_supported;
+ sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
sta_params->vht_ldpc_enabled =
is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
@@ -308,9 +310,10 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
sta_params->ht_ldpc_enabled =
- is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING);
+ is_cap_supported(sta->deflink.ht_cap.cap,
+ IEEE80211_HT_CAP_LDPC_CODING);
}
}
@@ -2627,6 +2630,62 @@ out:
return ret;
}
+int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
+ struct station_info *sinfo)
+{
+ struct wcn36xx_hal_stats_req_msg msg_body;
+ struct wcn36xx_hal_stats_rsp_msg *rsp;
+ void *rsp_body;
+ int ret;
+
+ if (stats_mask & ~HAL_GLOBAL_CLASS_A_STATS_INFO) {
+ wcn36xx_err("stats_mask 0x%x contains unimplemented types\n",
+ stats_mask);
+ return -EINVAL;
+ }
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GET_STATS_REQ);
+
+ msg_body.sta_id = sta_index;
+ msg_body.stats_mask = stats_mask;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("sending hal_get_stats failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_get_stats response failed err=%d\n", ret);
+ goto out;
+ }
+
+ rsp = (struct wcn36xx_hal_stats_rsp_msg *)wcn->hal_buf;
+ rsp_body = (wcn->hal_buf + sizeof(struct wcn36xx_hal_stats_rsp_msg));
+
+ if (rsp->stats_mask != stats_mask) {
+ wcn36xx_err("stats_mask 0x%x differs from requested 0x%x\n",
+ rsp->stats_mask, stats_mask);
+ goto out;
+ }
+
+ if (rsp->stats_mask & HAL_GLOBAL_CLASS_A_STATS_INFO) {
+ struct ani_global_class_a_stats_info *stats_info = rsp_body;
+
+ wcn36xx_process_tx_rate(stats_info, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ rsp_body += sizeof(struct ani_global_class_a_stats_info);
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{
struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
@@ -3092,9 +3151,9 @@ static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn,
cpu_to_le64(rsp->key_replay_counter);
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "GTK replay counter increment %llu\n",
- rsp->key_replay_counter);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "GTK replay counter increment %llu\n",
+ rsp->key_replay_counter);
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
@@ -3316,6 +3375,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_ADD_BA_SESSION_RSP:
case WCN36XX_HAL_ADD_BA_RSP:
case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_GET_STATS_RSP:
case WCN36XX_HAL_TRIGGER_BA_RSP:
case WCN36XX_HAL_UPDATE_CFG_RSP:
case WCN36XX_HAL_JOIN_RSP:
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 957cfa87fbde..3fd598ac2a27 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -138,6 +138,8 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn);
+int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
+ struct station_info *sinfo);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index df749b114568..8da3955995b6 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -699,3 +699,32 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
return ret;
}
+
+void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info)
+{
+ /* tx_rate is in units of 500kbps; mac80211 wants them in 100kbps */
+ if (stats->tx_rate_flags & HAL_TX_RATE_LEGACY)
+ info->legacy = stats->tx_rate * 5;
+
+ info->flags = 0;
+ info->mcs = stats->mcs_index;
+ info->nss = 1;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_HT40))
+ info->flags |= RATE_INFO_FLAGS_MCS;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_VHT20 | HAL_TX_RATE_VHT40 | HAL_TX_RATE_VHT80))
+ info->flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (stats->tx_rate_flags & HAL_TX_RATE_SGI)
+ info->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_VHT20))
+ info->bw = RATE_INFO_BW_20;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT40 | HAL_TX_RATE_VHT40))
+ info->bw = RATE_INFO_BW_40;
+
+ if (stats->tx_rate_flags & HAL_TX_RATE_VHT80)
+ info->bw = RATE_INFO_BW_80;
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
index b54311ffde9c..fb0d6cabd52b 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.h
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -164,5 +164,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
int wcn36xx_start_tx(struct wcn36xx *wcn,
struct wcn36xx_sta *sta_priv,
struct sk_buff *skb);
+void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info);
#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 764d1d14132b..8f2638f5b87b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1653,10 +1653,9 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
params->seq_len, params->seq);
return -EINVAL;
}
- }
-
- if (!IS_ERR(cs))
+ } else {
wil_del_rx_key(key_index, key_usage, cs);
+ }
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 0913f0bf60e7..390648066382 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -457,17 +457,17 @@ int wil_if_add(struct wil6210_priv *wil)
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma,
- WIL6210_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma,
- WIL6210_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
+ NAPI_POLL_WEIGHT);
}
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ed4df561e5c5..f521af575e9b 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -445,10 +445,9 @@ int wil_pm_runtime_get(struct wil6210_priv *wil)
int rc;
struct device *dev = wil_to_dev(wil);
- rc = pm_runtime_get_sync(dev);
+ rc = pm_runtime_resume_and_get(dev);
if (rc < 0) {
- wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
- pm_runtime_put_noidle(dev);
+ wil_err(wil, "pm_runtime_resume_and_get() failed, rc = %d\n", rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 11946ecd0b99..22a6eb3e12b7 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -82,7 +82,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (20) /* max number of stations */
#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */
-#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index cf3ccf4ddfe7..aa5c99465674 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
u16 data[4];
s16 gain[2];
u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+ static const s16 lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 05404fbd1e70..c1395e622759 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev)
struct b43legacy_phy *phy = &dev->phy;
u16 regstack[12] = { 0 };
u16 mls;
- u16 fval;
+ s16 fval;
int i;
int j;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ac02244a6fdf..9c598ea97499 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1119,9 +1119,21 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1);
- brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
- sdiodev->wowl_enabled = enabled;
+ /* Power must be preserved to be able to support WOWL. */
+ if (!(pm_caps & MMC_PM_KEEP_POWER))
+ goto notsup;
+
+ if (sdiodev->settings->bus.sdio.oob_irq_supported ||
+ pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
+ sdiodev->wowl_enabled = enabled;
+ brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ return;
+ }
+
+notsup:
+ brcmf_dbg(SDIO, "WOWL not supported\n");
}
#ifdef CONFIG_PM_SLEEP
@@ -1130,7 +1142,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
- mmc_pm_flag_t pm_caps, sdio_flags;
+ mmc_pm_flag_t sdio_flags;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1142,20 +1154,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- pm_caps = sdio_get_host_pm_caps(func);
-
- if (pm_caps & MMC_PM_KEEP_POWER) {
- /* preserve card power during suspend */
+ if (sdiodev->wowl_enabled) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->wowl_enabled) {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
- }
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1176,21 +1183,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
- mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
int ret = 0;
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!(pm_caps & MMC_PM_KEEP_POWER)) {
+ if (!sdiodev->wowl_enabled) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->wowl_enabled &&
- sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index f0ad1e23f3c8..360b103fe898 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -7481,6 +7481,7 @@ static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
{
switch (drvr->bus_if->chip) {
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43602_CHIP_ID:
return true;
default:
return false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index eadac0f5590f..8c741b98d8e5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -868,7 +868,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
spin_lock_bh(&wl->lock);
brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size,
(1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor)) - 1);
+ sta->deflink.ht_cap.ampdu_factor)) - 1);
spin_unlock_bh(&wl->lock);
/* Power save wakeup */
break;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 2ace2b27ecad..5234511dac78 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3501,7 +3501,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
priv->msg_buffers = NULL;
}
-static ssize_t show_pci(struct device *d, struct device_attribute *attr,
+static ssize_t pci_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(d);
@@ -3521,34 +3521,34 @@ static ssize_t show_pci(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(pci, 0444, show_pci, NULL);
+static DEVICE_ATTR_RO(pci);
-static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
-static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+static DEVICE_ATTR_RO(cfg);
-static ssize_t show_status(struct device *d, struct device_attribute *attr,
+static ssize_t status_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
-static DEVICE_ATTR(status, 0444, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_capability(struct device *d, struct device_attribute *attr,
+static ssize_t capability_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->capability);
}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
+static DEVICE_ATTR_RO(capability);
#define IPW2100_REG(x) { IPW_ ##x, #x }
static const struct {
@@ -3785,7 +3785,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"),
IPW2100_ORD(UCODE_VERSION, "Ucode Version"),};
-static ssize_t show_registers(struct device *d, struct device_attribute *attr,
+static ssize_t registers_show(struct device *d, struct device_attribute *attr,
char *buf)
{
int i;
@@ -3805,9 +3805,9 @@ static ssize_t show_registers(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(registers, 0444, show_registers, NULL);
+static DEVICE_ATTR_RO(registers);
-static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
+static ssize_t hardware_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3846,9 +3846,9 @@ static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
+static DEVICE_ATTR_RO(hardware);
-static ssize_t show_memory(struct device *d, struct device_attribute *attr,
+static ssize_t memory_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3905,7 +3905,7 @@ static ssize_t show_memory(struct device *d, struct device_attribute *attr,
return len;
}
-static ssize_t store_memory(struct device *d, struct device_attribute *attr,
+static ssize_t memory_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3940,9 +3940,9 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(memory, 0644, show_memory, store_memory);
+static DEVICE_ATTR_RW(memory);
-static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
+static ssize_t ordinals_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3976,9 +3976,9 @@ static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(ordinals, 0444, show_ordinals, NULL);
+static DEVICE_ATTR_RO(ordinals);
-static ssize_t show_stats(struct device *d, struct device_attribute *attr,
+static ssize_t stats_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3997,7 +3997,7 @@ static ssize_t show_stats(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(stats, 0444, show_stats, NULL);
+static DEVICE_ATTR_RO(stats);
static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
{
@@ -4043,7 +4043,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
return 0;
}
-static ssize_t show_internals(struct device *d, struct device_attribute *attr,
+static ssize_t internals_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4095,9 +4095,9 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(internals, 0444, show_internals, NULL);
+static DEVICE_ATTR_RO(internals);
-static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
+static ssize_t bssinfo_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4140,7 +4140,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(bssinfo, 0444, show_bssinfo, NULL);
+static DEVICE_ATTR_RO(bssinfo);
#ifdef CONFIG_IPW2100_DEBUG
static ssize_t debug_level_show(struct device_driver *d, char *buf)
@@ -4165,7 +4165,7 @@ static ssize_t debug_level_store(struct device_driver *d,
static DRIVER_ATTR_RW(debug_level);
#endif /* CONFIG_IPW2100_DEBUG */
-static ssize_t show_fatal_error(struct device *d,
+static ssize_t fatal_error_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4190,7 +4190,7 @@ static ssize_t show_fatal_error(struct device *d,
return out - buf;
}
-static ssize_t store_fatal_error(struct device *d,
+static ssize_t fatal_error_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -4199,16 +4199,16 @@ static ssize_t store_fatal_error(struct device *d,
return count;
}
-static DEVICE_ATTR(fatal_error, 0644, show_fatal_error, store_fatal_error);
+static DEVICE_ATTR_RW(fatal_error);
-static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->ieee->scan_age);
}
-static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4232,9 +4232,9 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
return strnlen(buf, count);
}
-static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+static DEVICE_ATTR_RW(scan_age);
-static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
char *buf)
{
/* 0 - RF kill not enabled
@@ -4278,7 +4278,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
return 1;
}
-static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4286,7 +4286,7 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+static DEVICE_ATTR_RW(rf_kill);
static struct attribute *ipw2100_sysfs_entries[] = {
&dev_attr_hardware.attr,
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5727c7c00a28..ed343d4fb9d5 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1259,7 +1259,7 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
return error;
}
-static ssize_t show_event_log(struct device *d,
+static ssize_t event_log_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1289,9 +1289,9 @@ static ssize_t show_event_log(struct device *d,
return len;
}
-static DEVICE_ATTR(event_log, 0444, show_event_log, NULL);
+static DEVICE_ATTR_RO(event_log);
-static ssize_t show_error(struct device *d,
+static ssize_t error_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1326,7 +1326,7 @@ static ssize_t show_error(struct device *d,
return len;
}
-static ssize_t clear_error(struct device *d,
+static ssize_t error_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1337,9 +1337,9 @@ static ssize_t clear_error(struct device *d,
return count;
}
-static DEVICE_ATTR(error, 0644, show_error, clear_error);
+static DEVICE_ATTR_RW(error);
-static ssize_t show_cmd_log(struct device *d,
+static ssize_t cmd_log_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1364,12 +1364,12 @@ static ssize_t show_cmd_log(struct device *d,
return len;
}
-static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL);
+static DEVICE_ATTR_RO(cmd_log);
#ifdef CONFIG_IPW2200_PROMISCUOUS
static void ipw_prom_free(struct ipw_priv *priv);
static int ipw_prom_alloc(struct ipw_priv *priv);
-static ssize_t store_rtap_iface(struct device *d,
+static ssize_t rtap_iface_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1414,7 +1414,7 @@ static ssize_t store_rtap_iface(struct device *d,
return count;
}
-static ssize_t show_rtap_iface(struct device *d,
+static ssize_t rtap_iface_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1429,9 +1429,9 @@ static ssize_t show_rtap_iface(struct device *d,
}
}
-static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface);
+static DEVICE_ATTR_ADMIN_RW(rtap_iface);
-static ssize_t store_rtap_filter(struct device *d,
+static ssize_t rtap_filter_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1451,7 +1451,7 @@ static ssize_t store_rtap_filter(struct device *d,
return count;
}
-static ssize_t show_rtap_filter(struct device *d,
+static ssize_t rtap_filter_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1460,17 +1460,17 @@ static ssize_t show_rtap_filter(struct device *d,
priv->prom_priv ? priv->prom_priv->filter : 0);
}
-static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter);
+static DEVICE_ATTR_ADMIN_RW(rtap_filter);
#endif
-static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->ieee->scan_age);
}
-static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1504,16 +1504,16 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+static DEVICE_ATTR_RW(scan_age);
-static ssize_t show_led(struct device *d, struct device_attribute *attr,
+static ssize_t led_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
}
-static ssize_t store_led(struct device *d, struct device_attribute *attr,
+static ssize_t led_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1537,36 +1537,36 @@ static ssize_t store_led(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(led, 0644, show_led, store_led);
+static DEVICE_ATTR_RW(led);
-static ssize_t show_status(struct device *d,
+static ssize_t status_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
-static DEVICE_ATTR(status, 0444, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
-static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+static DEVICE_ATTR_RO(cfg);
-static ssize_t show_nic_type(struct device *d,
+static ssize_t nic_type_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "TYPE: %d\n", priv->nic_type);
}
-static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL);
+static DEVICE_ATTR_RO(nic_type);
-static ssize_t show_ucode_version(struct device *d,
+static ssize_t ucode_version_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 len = sizeof(u32), tmp = 0;
@@ -1578,9 +1578,9 @@ static ssize_t show_ucode_version(struct device *d,
return sprintf(buf, "0x%08x\n", tmp);
}
-static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL);
+static DEVICE_ATTR_RO(ucode_version);
-static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
+static ssize_t rtc_show(struct device *d, struct device_attribute *attr,
char *buf)
{
u32 len = sizeof(u32), tmp = 0;
@@ -1592,20 +1592,20 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
return sprintf(buf, "0x%08x\n", tmp);
}
-static DEVICE_ATTR(rtc, 0644, show_rtc, NULL);
+static DEVICE_ATTR_RO(rtc);
/*
* Add a device attribute to view/control the delay between eeprom
* operations.
*/
-static ssize_t show_eeprom_delay(struct device *d,
+static ssize_t eeprom_delay_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
int n = p->eeprom_delay;
return sprintf(buf, "%i\n", n);
}
-static ssize_t store_eeprom_delay(struct device *d,
+static ssize_t eeprom_delay_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1614,9 +1614,9 @@ static ssize_t store_eeprom_delay(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay);
+static DEVICE_ATTR_RW(eeprom_delay);
-static ssize_t show_command_event_reg(struct device *d,
+static ssize_t command_event_reg_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1625,7 +1625,7 @@ static ssize_t show_command_event_reg(struct device *d,
reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_command_event_reg(struct device *d,
+static ssize_t command_event_reg_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1637,10 +1637,9 @@ static ssize_t store_command_event_reg(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(command_event_reg, 0644,
- show_command_event_reg, store_command_event_reg);
+static DEVICE_ATTR_RW(command_event_reg);
-static ssize_t show_mem_gpio_reg(struct device *d,
+static ssize_t mem_gpio_reg_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1649,7 +1648,7 @@ static ssize_t show_mem_gpio_reg(struct device *d,
reg = ipw_read_reg32(p, 0x301100);
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_mem_gpio_reg(struct device *d,
+static ssize_t mem_gpio_reg_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1661,9 +1660,9 @@ static ssize_t store_mem_gpio_reg(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg);
+static DEVICE_ATTR_RW(mem_gpio_reg);
-static ssize_t show_indirect_dword(struct device *d,
+static ssize_t indirect_dword_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1676,7 +1675,7 @@ static ssize_t show_indirect_dword(struct device *d,
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_indirect_dword(struct device *d,
+static ssize_t indirect_dword_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1687,10 +1686,9 @@ static ssize_t store_indirect_dword(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(indirect_dword, 0644,
- show_indirect_dword, store_indirect_dword);
+static DEVICE_ATTR_RW(indirect_dword);
-static ssize_t show_indirect_byte(struct device *d,
+static ssize_t indirect_byte_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u8 reg = 0;
@@ -1703,7 +1701,7 @@ static ssize_t show_indirect_byte(struct device *d,
return sprintf(buf, "0x%02x\n", reg);
}
-static ssize_t store_indirect_byte(struct device *d,
+static ssize_t indirect_byte_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1714,10 +1712,9 @@ static ssize_t store_indirect_byte(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(indirect_byte, 0644,
- show_indirect_byte, store_indirect_byte);
+static DEVICE_ATTR_RW(indirect_byte);
-static ssize_t show_direct_dword(struct device *d,
+static ssize_t direct_dword_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1730,7 +1727,7 @@ static ssize_t show_direct_dword(struct device *d,
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_direct_dword(struct device *d,
+static ssize_t direct_dword_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1741,7 +1738,7 @@ static ssize_t store_direct_dword(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword);
+static DEVICE_ATTR_RW(direct_dword);
static int rf_kill_active(struct ipw_priv *priv)
{
@@ -1756,7 +1753,7 @@ static int rf_kill_active(struct ipw_priv *priv)
return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
}
-static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
char *buf)
{
/* 0 - RF kill not enabled
@@ -1802,7 +1799,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
return 1;
}
-static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1812,9 +1809,9 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+static DEVICE_ATTR_RW(rf_kill);
-static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
+static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1829,7 +1826,7 @@ static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
return sprintf(buf, "0\n");
}
-static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
+static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1865,16 +1862,16 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan);
+static DEVICE_ATTR_RW(speed_scan);
-static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
+static ssize_t net_stats_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
}
-static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
+static ssize_t net_stats_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1886,9 +1883,9 @@ static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats);
+static DEVICE_ATTR_RW(net_stats);
-static ssize_t show_channels(struct device *d,
+static ssize_t channels_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1932,7 +1929,7 @@ static ssize_t show_channels(struct device *d,
return len;
}
-static DEVICE_ATTR(channels, 0400, show_channels, NULL);
+static DEVICE_ATTR_ADMIN_RO(channels);
static void notify_wx_assoc_event(struct ipw_priv *priv)
{
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 36d1e6b2568d..4aec1fce1ae2 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryption
* pre/postfix */
- if (host_encrypt)
+ if (host_encrypt && crypt && crypt->ops)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_mpdu_postfix_len;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index b2478cbe558e..0eaad980c85c 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -354,13 +354,13 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
* after assoc.. */
for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
+ if (sta->deflink.supp_rates[sband->band] & (1 << i)) {
rs_sta->last_txrate_idx = i;
break;
}
}
- il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ il->_3945.sta_supp_rates = sta->deflink.supp_rates[sband->band];
/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
if (sband->band == NL80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
@@ -631,7 +631,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
il_sta = NULL;
}
- rate_mask = sta->supp_rates[sband->band];
+ rate_mask = sta->deflink.supp_rates[sband->band];
/* get user max rate if set */
max_rate_idx = fls(txrc->rate_idx_mask) - 1;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 9a491e5db75b..9dd2d890e35f 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -627,7 +627,7 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
static bool
il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
{
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ return (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!il->ht.non_gf_sta_present;
}
@@ -970,7 +970,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
lq_sta->last_rate_n_flags = tx_rate;
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[sband->band])
+ if (sta->deflink.supp_rates[sband->band])
il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
}
@@ -1164,7 +1164,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
s32 rate;
s8 is_green = lq_sta->is_green;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -1182,7 +1182,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1217,7 +1217,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
u8 is_green = lq_sta->is_green;
s32 rate;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
D_RATE("LQ: try to switch to SISO\n");
@@ -1228,7 +1228,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1384,7 +1384,7 @@ il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
struct il_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct il_rate_scale_data *win = &(tbl->win[idx]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz =
(sizeof(struct il_scale_tbl_info) -
(sizeof(struct il_rate_scale_data) * RATE_COUNT));
@@ -1507,7 +1507,7 @@ il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
struct il_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct il_rate_scale_data *win = &(tbl->win[idx]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz =
(sizeof(struct il_scale_tbl_info) -
(sizeof(struct il_rate_scale_data) * RATE_COUNT));
@@ -1760,7 +1760,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band];
tid = il4965_rs_tl_add_packet(lq_sta, hdr);
if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -2271,7 +2271,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
int i, j;
struct ieee80211_hw *hw = il->hw;
struct ieee80211_conf *conf = &il->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct il_station_priv *sta_priv;
struct il_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2288,7 +2288,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[sband->band];
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < RATE_COUNT; i++)
il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 683b632981ed..8299d89e7505 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1863,7 +1863,7 @@ EXPORT_SYMBOL(il_send_add_sta);
static void
il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap;
__le32 sta_flags;
if (!sta || !sta_ht_inf->ht_supported)
@@ -1900,7 +1900,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
cpu_to_le32((u32) sta_ht_inf->
ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
sta_flags |= STA_FLG_HT40_EN_MSK;
else
sta_flags &= ~STA_FLG_HT40_EN_MSK;
@@ -5222,7 +5222,7 @@ il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int maxstreams;
maxstreams =
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b7c8b209bfea..baffa1cbe8fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -1039,7 +1039,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->last_rate_n_flags = tx_rate;
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
+ if (sta && sta->deflink.supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
@@ -1239,7 +1239,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -1294,7 +1294,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -1350,7 +1350,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
@@ -1570,7 +1570,7 @@ static void rs_move_siso_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1740,7 +1740,7 @@ static void rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1908,7 +1908,7 @@ static void rs_move_mimo3_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -2212,7 +2212,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
@@ -2763,7 +2763,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
int i, j;
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct iwl_station_priv *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2781,7 +2781,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[sband->band];
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
sta_id);
@@ -2798,7 +2798,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/*
* active legacy rates as per supported rates bitmap
*/
- supp = sta->supp_rates[sband->band];
+ supp = sta->deflink.supp_rates[sband->band];
lq_sta->active_legacy_rate = 0;
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 70338bc7bb54..5dd2d43a01d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -1280,7 +1280,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
break;
}
- ht_cap = &sta->ht_cap;
+ ht_cap = &sta->deflink.ht_cap;
need_multiple = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8f7a0f36c276..476068c0abb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -139,7 +139,7 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
if (!sta)
return true;
- return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ return sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -147,7 +147,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
__le32 *flags, __le32 *mask)
{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap;
*mask = STA_FLG_RTS_MIMO_PROT_MSK |
STA_FLG_MIMO_DIS_MSK |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 52bf96585fc6..ba538d70985f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -26,7 +26,7 @@ struct iwl_fw_ini_hcmd {
u8 id;
u8 group;
__le16 reserved;
- u8 data[0];
+ u8 data[];
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
@@ -275,7 +275,7 @@ struct iwl_fw_ini_conf_set_tlv {
__le32 time_point;
__le32 set_type;
__le32 addr_offset;
- struct iwl_fw_ini_addr_val addr_val[0];
+ struct iwl_fw_ini_addr_val addr_val[];
} __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 6255257ddebe..0c555089e05f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -240,7 +240,7 @@ struct iwl_mfu_assert_dump_notif {
__le16 index_num;
__le16 parts_num;
__le32 data_size;
- __le32 data[0];
+ __le32 data[];
} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
/**
@@ -276,7 +276,7 @@ struct iwl_mvm_marker {
u8 marker_id;
__le16 reserved;
__le64 timestamp;
- __le32 metadata[0];
+ __le32 metadata[];
} __packed; /* MARKER_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index e44c70b7c790..88fe61d144d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -33,7 +33,7 @@ struct iwl_mcast_filter_cmd {
u8 pass_all;
u8 bssid[6];
u8 reserved[2];
- u8 addr_list[0];
+ u8 addr_list[];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5413087ae909..5543d9cb74c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1165,7 +1165,7 @@ struct iwl_scan_offload_profiles_query_v1 {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match_v1 matches[0];
+ struct iwl_scan_offload_profile_match_v1 matches[];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
/**
@@ -1209,7 +1209,7 @@ struct iwl_scan_offload_profiles_query {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match matches[0];
+ struct iwl_scan_offload_profile_match matches[];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 5edbe27c0922..d62fed543276 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -477,7 +477,7 @@ struct iwl_mvm_wep_key_cmd {
u8 decryption_type;
u8 flags;
u8 reserved;
- struct iwl_mvm_wep_key wep_key[0];
+ struct iwl_mvm_wep_key wep_key[];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 14d35000abed..893438aadab0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -132,7 +132,7 @@ struct iwl_tdls_config_cmd {
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
- u8 pti_req_template[0];
+ u8 pti_req_template[];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 079fa0023bd8..c62576e442bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -84,7 +84,7 @@ struct iwl_fw_error_dump_data {
struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 5679a78758be..a7817d952022 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -145,7 +145,7 @@ struct iwl_tlv_ucode_header {
* Note that each TLV is padded to a length
* that is a multiple of 4 for alignment.
*/
- u8 data[0];
+ u8 data[];
};
/*
@@ -603,7 +603,7 @@ struct iwl_fw_dbg_dest_tlv_v1 {
__le32 wrap_count;
u8 base_shift;
u8 end_shift;
- struct iwl_fw_dbg_reg_op reg_ops[0];
+ struct iwl_fw_dbg_reg_op reg_ops[];
} __packed;
/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
@@ -623,14 +623,14 @@ struct iwl_fw_dbg_dest_tlv {
__le32 wrap_count;
u8 base_shift;
u8 size_shift;
- struct iwl_fw_dbg_reg_op reg_ops[0];
+ struct iwl_fw_dbg_reg_op reg_ops[];
} __packed;
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -705,7 +705,7 @@ struct iwl_fw_dbg_trigger_tlv {
u8 flags;
u8 reserved[5];
- u8 data[0];
+ u8 data[];
} __packed;
#define FW_DBG_START_FROM_ALIVE 0
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index 11e3009121cc..be1456dea484 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -298,7 +298,7 @@ struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
- u8 payload[0];
+ u8 payload[];
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index a995bba0ba81..bcc4ed20fe5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -915,7 +915,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
wowlan_config_cmd->is_11n_connection =
- ap_sta->ht_cap.ht_supported;
+ ap_sta->deflink.ht_cap.ht_supported;
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 784d91281c02..4fda6c3ba9f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1877,8 +1877,8 @@ static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_he_pkt_ext_v2 *pkt_ext)
{
- u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
- u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &sta->deflink.he_cap.ppe_thres[0];
u8 ru_index_bitmap =
u8_get_bits(*ppe,
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
@@ -1993,7 +1993,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
return;
}
- if (!sta->he_cap.has_he) {
+ if (!sta->deflink.he_cap.has_he) {
rcu_read_unlock();
return;
}
@@ -2005,17 +2005,17 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
flags |= STA_CTXT_HE_RU_2MHZ_BLOCK;
/* HTC flags */
- if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_HTC_HE)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
- if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
- (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
u8 link_adap =
- ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
- (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
if (link_adap == 2)
@@ -2025,12 +2025,12 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
}
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
- if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] &
IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
- if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
/*
@@ -2041,7 +2041,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
- if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
&sta_ctxt_cmd.pkt_ext);
@@ -2050,7 +2050,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
* according to Common Nominal Packet Padding fiels. */
} else {
u8 nominal_padding =
- u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
@@ -2058,11 +2058,11 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
&flags);
}
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP)
flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN)
flags |= STA_CTXT_HE_ACK_ENABLED;
@@ -3157,7 +3157,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION)
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
if (sta->tdls &&
(vif->p2p ||
@@ -3189,17 +3189,17 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
if (vif->type == NL80211_IFTYPE_AP) {
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->he_ru_2mhz_block = false;
- if (sta->he_cap.has_he)
+ if (sta->deflink.he_cap.has_he)
iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 9830d2663689..d8c3d7ff4f44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -11,7 +11,7 @@
static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
{
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
@@ -38,9 +38,9 @@ static u8 rs_fw_set_active_chains(u8 chains)
static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
u8 supp = 0;
if (he_cap->has_he)
@@ -62,9 +62,9 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
bool vht_ena = vht_cap->vht_supported;
u16 flags = 0;
@@ -136,7 +136,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
{
u16 supp;
int i, highest_mcs;
- u8 max_nss = sta->rx_nss;
+ u8 max_nss = sta->deflink.rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
.vht_cap_info = cpu_to_le32(vht_cap->cap),
.supp_mcs = vht_cap->vht_mcs,
@@ -154,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
continue;
supp = BIT(highest_mcs + 1) - 1;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
@@ -163,7 +163,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
*/
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160 &&
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
@@ -194,7 +194,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
{
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
u16 tx_mcs_80 =
@@ -202,7 +202,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
u16 tx_mcs_160 =
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
int i;
- u8 nss = sta->rx_nss;
+ u8 nss = sta->deflink.rx_nss;
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -245,12 +245,12 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
int i;
u16 supp = 0;
unsigned long tmp; /* must be unsigned long for for_each_set_bit */
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
/* non HT rates */
- tmp = sta->supp_rates[sband->band];
+ tmp = sta->deflink.supp_rates[sband->band];
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);
@@ -378,11 +378,11 @@ out:
u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
- switch (le16_get_bits(sta->he_6ghz_capa.capa,
+ switch (le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
return IEEE80211_MAX_MPDU_LEN_VHT_11454;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 62114616317c..974eeecc9153 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -135,7 +135,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
return false;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -157,7 +157,7 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
return false;
return true;
@@ -167,8 +167,8 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
if (is_ht20(rate) && (ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
@@ -1369,13 +1369,13 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->deflink.vht_cap;
struct ieee80211_vht_cap vht_cap = {
.vht_cap_info = cpu_to_le32(sta_vht_cap->cap),
.supp_mcs = sta_vht_cap->vht_mcs,
};
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
/*
* Don't use 160 MHz if VHT extended NSS support
@@ -1388,7 +1388,7 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
if (ieee80211_get_vht_max_nss(&vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true,
- sta->rx_nss) < sta->rx_nss)
+ sta->deflink.rx_nss) < sta->deflink.rx_nss)
return RATE_MCS_CHAN_WIDTH_80;
return RATE_MCS_CHAN_WIDTH_160;
case IEEE80211_STA_RX_BW_80:
@@ -2537,7 +2537,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
* In case of VHT/HT when the rssi is low fallback to the case of
* legacy rates.
*/
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
/*
* In AP mode, when a new station associates, rs is initialized
@@ -2563,14 +2563,15 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
break;
default:
- IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
+ IWL_ERR(mvm, "Invalid BW %d\n",
+ sta->deflink.bandwidth);
goto out;
}
active_rate = lq_sta->active_siso_rate;
rate->type = LQ_VHT_SISO;
rate->bw = bw;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
initial_rates = rs_optimal_rates_ht;
nentries = ARRAY_SIZE(rs_optimal_rates_ht);
@@ -2761,14 +2762,14 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
- sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
continue;
lq_sta->active_siso_rate |= BIT(i);
}
}
- if (sta->rx_nss < 2)
+ if (sta->deflink.rx_nss < 2)
return;
highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
@@ -2779,7 +2780,7 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
- sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
continue;
lq_sta->active_mimo2_rate |= BIT(i);
@@ -2916,8 +2917,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
struct ieee80211_supported_band *sband;
@@ -2953,7 +2954,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/*
* active legacy rates as per supported rates bitmap
*/
- supp = sta->supp_rates[sband->band];
+ supp = sta->deflink.supp_rates[sband->band];
lq_sta->active_legacy_rate = 0;
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
@@ -3246,7 +3247,7 @@ static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[info->band])
+ if (sta->deflink.supp_rates[info->band])
rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 655da8856c75..693752d8f65b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -106,10 +106,10 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
* capabilities of the AP station, and choose the watermark accordingly.
*/
if (sta) {
- if (sta->ht_cap.ht_supported ||
- sta->vht_cap.vht_supported ||
- sta->he_cap.has_he) {
- switch (sta->rx_nss) {
+ if (sta->deflink.ht_cap.ht_supported ||
+ sta->deflink.vht_cap.vht_supported ||
+ sta->deflink.he_cap.has_he) {
+ switch (sta->deflink.rx_nss) {
case 1:
watermark = SF_W_MARK_SISO;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c7f9d3870f21..406f0a50a5bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -86,7 +86,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
case IEEE80211_STA_RX_BW_160:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
@@ -98,13 +98,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
fallthrough;
case IEEE80211_STA_RX_BW_20:
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
add_sta_cmd.station_flags |=
cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
break;
}
- switch (sta->rx_nss) {
+ switch (sta->deflink.rx_nss) {
case 1:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
break;
@@ -134,12 +134,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
- mpdu_dens = sta->ht_cap.ampdu_density;
+ mpdu_dens = sta->deflink.ht_cap.ampdu_density;
}
if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
@@ -147,18 +147,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
- mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa,
+ mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
- agg_size = le16_get_bits(sta->he_6ghz_capa.capa,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- } else
- if (sta->vht_cap.vht_supported) {
- agg_size = sta->vht_cap.cap &
+ agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ agg_size = sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
agg_size >>=
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- } else if (sta->ht_cap.ht_supported) {
- agg_size = sta->ht_cap.ampdu_factor;
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ agg_size = sta->deflink.ht_cap.ampdu_factor;
}
/* D6.0 10.12.2 A-MPDU length limit rules
@@ -169,8 +168,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* Maximum AMPDU Length Exponent Extension field in its HE
* Capabilities element
*/
- if (sta->he_cap.has_he)
- agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
+ if (sta->deflink.he_cap.has_he)
+ agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
/* Limit to max A-MPDU supported by FW */
@@ -782,7 +781,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
/* this queue isn't used for traffic (cab_queue) */
if (IS_ERR_OR_NULL(sta)) {
size = IWL_MGMT_QUEUE_SIZE;
- } else if (sta->he_cap.has_he) {
+ } else if (sta->deflink.he_cap.has_he) {
/* support for 256 ba size */
size = IWL_DEFAULT_QUEUE_SIZE_HE;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7763037b93ed..8125bb76f59e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -794,7 +794,7 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
/* For HE redirect to trigger based fifos */
- if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
ac += 4;
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
@@ -935,7 +935,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* section 8.7.3 NOTE 3).
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- !sta->vht_cap.vht_supported)
+ !sta->deflink.vht_cap.vht_supported)
max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
/* Sub frame header + SNAP + IP header + TCP header + MSS */
@@ -1083,7 +1083,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
- if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
+ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
return -1;
if (unlikely(ieee80211_is_probe_resp(fc)))
diff --git a/drivers/net/wireless/intersil/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c
index 77e6c53040a3..a890bfa0d5cc 100644
--- a/drivers/net/wireless/intersil/orinoco/airport.c
+++ b/drivers/net/wireless/intersil/orinoco/airport.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <asm/pmac_feature.h>
#include "orinoco.h"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 28bfa7b7b73c..afdf48550588 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2189,7 +2189,7 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
u32 bw = U32_MAX;
enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
C(20);
C(40);
@@ -2211,7 +2211,7 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
WARN(bw > hwsim_get_chanwidth(confbw),
"intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
- vif->addr, sta->addr, bw, sta->bandwidth,
+ vif->addr, sta->addr, bw, sta->deflink.bandwidth,
hwsim_get_chanwidth(data->bw), data->bw);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index d2ee6469e67b..3fa25cd64cda 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -303,5 +303,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
+ mutex_lock(&priv->wdev.mtx);
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+ mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 4f3238d2a171..76004bda0c02 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -182,6 +182,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
.host_int_rsr_reg = 0x4,
.host_int_status_reg = 0x0C,
.host_int_mask_reg = 0x08,
+ .host_strap_reg = 0xF4,
+ .host_strap_mask = 0x01,
+ .host_strap_value = 0x00,
.status_reg_0 = 0xE8,
.status_reg_1 = 0xE9,
.sdio_int_mask = 0xff,
@@ -283,6 +286,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
.host_int_rsr_reg = 0x4,
.host_int_status_reg = 0x0C,
.host_int_mask_reg = 0x08,
+ .host_strap_reg = 0xF4,
+ .host_strap_mask = 0x01,
+ .host_strap_value = 0x00,
.status_reg_0 = 0xE8,
.status_reg_1 = 0xE9,
.sdio_int_mask = 0xff,
@@ -402,6 +408,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
+ .firmware_sdiouart = SD8997_SDIOUART_FW_NAME,
.reg = &mwifiex_reg_sd8997,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
@@ -536,6 +543,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
struct mwifiex_sdio_device *data = (void *)id->driver_data;
card->firmware = data->firmware;
+ card->firmware_sdiouart = data->firmware_sdiouart;
card->reg = data->reg;
card->max_ports = data->max_ports;
card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
@@ -2439,6 +2447,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
+ const char *firmware = card->firmware;
/* save adapter pointer in card */
card->adapter = adapter;
@@ -2455,7 +2464,18 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return ret;
}
- strcpy(adapter->fw_name, card->firmware);
+ /* Select correct firmware (sdsd or sdiouart) firmware based on the strapping
+ * option
+ */
+ if (card->firmware_sdiouart) {
+ u8 val;
+
+ mwifiex_read_reg(adapter, card->reg->host_strap_reg, &val);
+ if ((val & card->reg->host_strap_mask) == card->reg->host_strap_value)
+ firmware = card->firmware_sdiouart;
+ }
+ strcpy(adapter->fw_name, firmware);
+
if (card->fw_dump_enh) {
adapter->mem_type_mapping_tbl = generic_mem_type_map;
adapter->num_mem_types = 1;
@@ -3157,3 +3177,4 @@ MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 5648512c9300..28e8f76bdd58 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -39,6 +39,7 @@
#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
+#define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
@@ -196,6 +197,9 @@ struct mwifiex_sdio_card_reg {
u8 host_int_rsr_reg;
u8 host_int_status_reg;
u8 host_int_mask_reg;
+ u8 host_strap_reg;
+ u8 host_strap_mask;
+ u8 host_strap_value;
u8 status_reg_0;
u8 status_reg_1;
u8 sdio_int_mask;
@@ -241,6 +245,7 @@ struct sdio_mmc_card {
struct completion fw_done;
const char *firmware;
+ const char *firmware_sdiouart;
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
@@ -274,6 +279,7 @@ struct sdio_mmc_card {
struct mwifiex_sdio_device {
const char *firmware;
+ const char *firmware_sdiouart;
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 864a2ba9efee..36c24d17136c 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1985,7 +1985,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
txpriority = index;
- if (priv->ap_fw && sta && sta->ht_cap.ht_supported && !eapol_frame &&
+ if (priv->ap_fw && sta && sta->deflink.ht_cap.ht_supported && !eapol_frame &&
ieee80211_is_data_qos(wh->frame_control)) {
tid = qos & 0xf;
mwl8k_tx_count_packet(sta, tid);
@@ -4027,9 +4027,9 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
cmd->create_params.reset_seq_no_flag = 1;
cmd->create_params.param_info =
- (stream->sta->ht_cap.ampdu_factor &
+ (stream->sta->deflink.ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
- ((stream->sta->ht_cap.ampdu_density << 2) &
+ ((stream->sta->deflink.ht_cap.ampdu_density << 2) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
cmd->create_params.flags =
@@ -4113,18 +4113,18 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
cmd->stn_id = cpu_to_le16(sta->aid);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
- rates = sta->supp_rates[NL80211_BAND_2GHZ];
+ rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
+ rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
cmd->legacy_rates = cpu_to_le32(rates);
- if (sta->ht_cap.ht_supported) {
- cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
- cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
- cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
- cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
- cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
- cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
- ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (sta->deflink.ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->deflink.ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->deflink.ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->deflink.ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->deflink.ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->deflink.ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->deflink.ht_cap.ampdu_factor & 3) |
+ ((sta->deflink.ht_cap.ampdu_density & 7) << 2);
cmd->is_qos_sta = 1;
}
@@ -4545,16 +4545,16 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p = &cmd->peer_info;
p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
- p->ht_support = sta->ht_cap.ht_supported;
- p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
- p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
- ((sta->ht_cap.ampdu_density & 7) << 2);
+ p->ht_support = sta->deflink.ht_cap.ht_supported;
+ p->ht_caps = cpu_to_le16(sta->deflink.ht_cap.cap);
+ p->extended_ht_caps = (sta->deflink.ht_cap.ampdu_factor & 3) |
+ ((sta->deflink.ht_cap.ampdu_density & 7) << 2);
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
- rates = sta->supp_rates[NL80211_BAND_2GHZ];
+ rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
+ rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
- memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
+ memcpy(p->ht_rates, &sta->deflink.ht_cap.mcs, 16);
p->interop = 1;
p->amsdu_enabled = 0;
@@ -5031,12 +5031,12 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
- ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
+ ap_legacy_rates = ap->deflink.supp_rates[NL80211_BAND_2GHZ];
} else {
ap_legacy_rates =
- ap->supp_rates[NL80211_BAND_5GHZ] << 5;
+ ap->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
}
- memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
+ memcpy(ap_mcs_rates, &ap->deflink.ht_cap.mcs, 16);
rcu_read_unlock();
@@ -5347,7 +5347,7 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
MWL8K_STA(sta)->is_ampdu_allowed = true;
ret = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 17713c821d80..49a511ae8161 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -326,19 +326,21 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
addr = mt7603_wtbl1_addr(idx);
- ampdu_density = sta->ht_cap.ampdu_density;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
val = mt76_rr(dev, addr + 2 * 4);
val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
- val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
- FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
+ val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR,
+ sta->deflink.ht_cap.ampdu_factor) |
+ FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY,
+ sta->deflink.ht_cap.ampdu_density) |
MT_WTBL1_W2_TXS_BAF_REPORT;
- if (sta->ht_cap.cap)
+ if (sta->deflink.ht_cap.cap)
val |= MT_WTBL1_W2_HT;
- if (sta->vht_cap.cap)
+ if (sta->deflink.vht_cap.cap)
val |= MT_WTBL1_W2_VHT;
mt76_wr(dev, addr + 2 * 4, val);
@@ -347,9 +349,9 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
val = mt76_rr(dev, addr + 9 * 4);
val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
MT_WTBL2_W9_SHORT_GI_80);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
val |= MT_WTBL2_W9_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
val |= MT_WTBL2_W9_SHORT_GI_40;
mt76_wr(dev, addr + 9 * 4, val);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 7cb17bf40e35..51a9b5d60c7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -610,7 +610,7 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
static void
mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
struct sta_rec_he *he;
struct tlv *tlv;
@@ -698,7 +698,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->he_cap = cpu_to_le32(cap);
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
@@ -750,9 +750,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
u8 mode = 0;
if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
+ ht_cap = &sta->deflink.ht_cap;
+ vht_cap = &sta->deflink.vht_cap;
+ he_cap = &sta->deflink.he_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -801,25 +801,25 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
u16 supp_rates;
/* starec ht */
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
struct sta_rec_ht *ht;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
}
/* starec vht */
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
struct sta_rec_vht *vht;
int len;
len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
/* starec uapsd */
@@ -828,11 +828,11 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
if (!is_mt7921(dev))
return;
- if (sta->ht_cap.ht_supported || sta->he_cap.has_he)
+ if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)
mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
/* starec he */
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
mt76_connac_mcu_sta_he_tlv(skb, sta);
if (band == NL80211_BAND_6GHZ &&
sta_state == MT76_STA_INFO_STATE_ASSOC) {
@@ -841,7 +841,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G,
sizeof(*he_6g_capa));
he_6g_capa = (struct sta_rec_he_6g_capa *)tlv;
- he_6g_capa->capa = sta->he_6ghz_capa.capa;
+ he_6g_capa->capa = sta->deflink.he_6ghz_capa.capa;
}
}
@@ -851,14 +851,14 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
- sta->ht_cap.ampdu_factor) |
+ sta->deflink.ht_cap.ampdu_factor) |
FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
- sta->ht_cap.ampdu_density);
+ sta->deflink.ht_cap.ampdu_density);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
- supp_rates = sta->supp_rates[band];
+ supp_rates = sta->deflink.supp_rates[band];
if (band == NL80211_BAND_2GHZ)
supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
@@ -867,17 +867,18 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
ra_info->legacy = cpu_to_le16(supp_rates);
- if (sta->ht_cap.ht_supported)
- memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
+ if (sta->deflink.ht_cap.ht_supported)
+ memcpy(ra_info->rx_mcs_bitmask,
+ sta->deflink.ht_cap.mcs.rx_mask,
HT_MCS_MASK_NUM);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
state = (struct sta_rec_state *)tlv;
state->state = sta_state;
- if (sta->vht_cap.vht_supported) {
- state->vht_opmode = sta->bandwidth;
- state->vht_opmode |= (sta->rx_nss - 1) <<
+ if (sta->deflink.vht_cap.vht_supported) {
+ state->vht_opmode = sta->deflink.bandwidth;
+ state->vht_opmode |= (sta->deflink.rx_nss - 1) <<
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
}
}
@@ -905,27 +906,27 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct tlv *tlv;
u32 flags = 0;
- if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) {
+ if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_6ghz_capa.capa) {
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
ht->ldpc = ht_ldpc &&
- !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+ !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- if (sta->ht_cap.ht_supported) {
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
+ if (sta->deflink.ht_cap.ht_supported) {
+ ht->af = sta->deflink.ht_cap.ampdu_factor;
+ ht->mm = sta->deflink.ht_cap.ampdu_density;
} else {
- ht->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ ht->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- ht->mm = le16_get_bits(sta->he_6ghz_capa.capa,
+ ht->mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
}
ht->ht = true;
}
- if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) {
+ if (sta->deflink.vht_cap.vht_supported || sta->deflink.he_6ghz_capa.capa) {
struct wtbl_vht *vht;
u8 af;
@@ -934,18 +935,18 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
vht->ldpc = vht_ldpc &&
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
if (ht)
ht->af = max(ht->af, af);
}
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
- if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
+ if (is_connac_v1(dev) && sta->deflink.ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
@@ -955,15 +956,15 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*raw), wtbl_tlv,
sta_wtbl);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
flags |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
flags |= MT_WTBL_W5_SHORT_GI_40;
- if (sta->vht_cap.vht_supported) {
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (sta->deflink.vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
flags |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
flags |= MT_WTBL_W5_SHORT_GI_160;
}
raw = (struct wtbl_raw *)tlv;
@@ -1231,9 +1232,9 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
return 0x38;
if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
+ ht_cap = &sta->deflink.ht_cap;
+ vht_cap = &sta->deflink.vht_cap;
+ he_cap = &sta->deflink.he_cap;
} else {
struct ieee80211_supported_band *sband;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 2afad8c76ca6..cf4d4110cc99 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -412,9 +412,9 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
- ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size <<= sta->deflink.ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 4e1ecaec8f4f..e9cab1165f38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -1017,8 +1017,8 @@ static ssize_t mt7915_sta_fixed_rate_set(struct file *file,
phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0);
for (i = 0; i <= phy.bw; i++) {
- phy.sgi |= gi << (i << sta->he_cap.has_he);
- phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he);
+ phy.sgi |= gi << (i << sta->deflink.he_cap.has_he);
+ phy.he_ltf |= he_ltf << (i << sta->deflink.he_cap.has_he);
}
field = RATE_PARAM_FIXED;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index e9e7efbf350d..bab70cf981bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1354,7 +1354,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
u16 fc, tid;
u32 val;
- if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
return;
tid = le32_get_bits(txwi[1], MT_TXD1_TID);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index e7a6f80e7755..df31084e860f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -112,7 +112,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
struct mt7915_dev *dev = msta->vif->phy->dev;
enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -152,7 +152,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
/* only support 2ss on 160MHz for mt7915 */
if (is_mt7915(&dev->mt76) && nss > 1 &&
- sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
@@ -165,8 +165,8 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_dev *dev = msta->vif->phy->dev;
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
u16 mcs;
for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
@@ -188,7 +188,7 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
/* only support 2ss on 160MHz for mt7915 */
if (is_mt7915(&dev->mt76) && nss > 1 &&
- sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
}
@@ -197,10 +197,10 @@ static void
mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
const u8 *mask)
{
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++)
- ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
+ ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
}
static int
@@ -788,13 +788,13 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
@@ -880,8 +880,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
he->he_cap = cpu_to_le32(cap);
- mcs_map = sta->he_cap.he_mcs_nss_supp;
- switch (sta->bandwidth) {
+ mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
@@ -931,7 +931,7 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
struct sta_rec_muru *muru;
struct tlv *tlv;
@@ -949,11 +949,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
muru->cfg.mimo_ul_en = true;
muru->cfg.ofdma_dl_en = true;
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
muru->mimo_dl.vht_mu_bfee =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
muru->mimo_dl.partial_bw_dl_mimo =
@@ -987,13 +987,13 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_ht *ht;
struct tlv *tlv;
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
}
static void
@@ -1002,15 +1002,15 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_vht *vht;
struct tlv *tlv;
- if (!sta->vht_cap.vht_supported)
+ if (!sta->deflink.vht_cap.vht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
static void
@@ -1097,8 +1097,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
if (!bfee && tx_ant < 2)
return false;
- if (sta->he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
if (bfee)
return mvif->cap.he_su_ebfee &&
@@ -1108,8 +1108,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
}
- if (sta->vht_cap.vht_supported) {
- u32 cap = sta->vht_cap.cap;
+ if (sta->deflink.vht_cap.vht_supported) {
+ u32 cap = sta->deflink.vht_cap.cap;
if (bfee)
return mvif->cap.vht_su_ebfee &&
@@ -1135,7 +1135,7 @@ static void
mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
struct sta_rec_bf *bf)
{
- struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
@@ -1160,7 +1160,7 @@ static void
mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
@@ -1181,14 +1181,14 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = bf->ncol;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = nss_mcs;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
}
}
@@ -1197,7 +1197,7 @@ static void
mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
struct mt7915_phy *phy, struct sta_rec_bf *bf)
{
- struct ieee80211_sta_he_cap *pc = &sta->he_cap;
+ struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
const struct ieee80211_sta_he_cap *vc =
mt76_connac_get_he_phy_cap(phy->mt76, vif);
@@ -1222,7 +1222,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = bf->ncol;
- if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
return;
/* go over for 160MHz and 80p80 */
@@ -1270,7 +1270,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
};
bool ebf;
- if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
return;
ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
@@ -1284,21 +1284,21 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->he_cap.has_he && ebf)
+ if (sta->deflink.he_cap.has_he && ebf)
mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
- else if (sta->vht_cap.vht_supported)
+ else if (sta->deflink.vht_cap.vht_supported)
mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf);
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
mt7915_mcu_sta_bfer_ht(sta, phy, bf);
else
return;
bf->bf_cap = ebf ? ebf : dev->ibf << 1;
- bf->bw = sta->bandwidth;
- bf->ibf_dbw = sta->bandwidth;
+ bf->bw = sta->deflink.bandwidth;
+ bf->ibf_dbw = sta->deflink.bandwidth;
bf->ibf_nrow = tx_ant;
- if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = 0x48;
else
bf->ibf_timeout = 0x18;
@@ -1308,7 +1308,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
else
bf->mem_20m = matrix[bf->nrow][bf->ncol];
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
case IEEE80211_STA_RX_BW_80:
bf->mem_total = bf->mem_20m * 2;
@@ -1333,7 +1333,7 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct tlv *tlv;
u8 nrow = 0;
- if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he))
+ if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
return;
if (!mt7915_is_ebf_supported(phy, vif, sta, true))
@@ -1342,13 +1342,13 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
- if (sta->he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
pe->phy_cap_info[5]);
- } else if (sta->vht_cap.vht_supported) {
- struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
pc->cap);
@@ -1464,7 +1464,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
do { \
u8 i, gi = mask->control[band]._gi; \
gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
- for (i = 0; i <= sta->bandwidth; i++) { \
+ for (i = 0; i <= sta->deflink.bandwidth; i++) { \
phy.sgi |= gi << (i << (_he)); \
phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
} \
@@ -1476,11 +1476,11 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
} \
} while (0)
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
__sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
- } else if (sta->vht_cap.vht_supported) {
+ } else if (sta->deflink.vht_cap.vht_supported) {
__sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
__sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
@@ -1514,7 +1514,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
* actual txrate hardware sends out.
*/
addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
- if (sta->he_cap.has_he)
+ if (sta->deflink.he_cap.has_he)
mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
else
mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
@@ -1547,7 +1547,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
struct tlv *tlv;
- u32 supp_rate = sta->supp_rates[band];
+ u32 supp_rate = sta->deflink.supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
@@ -1557,8 +1557,8 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->auto_rate = true;
ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = sta->bandwidth;
- ra->phy.bw = sta->bandwidth;
+ ra->bw = sta->deflink.bandwidth;
+ ra->phy.bw = sta->deflink.bandwidth;
ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
if (supp_rate) {
@@ -1579,22 +1579,22 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
}
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
ra->supp_mode |= MODE_HT;
- ra->af = sta->ht_cap.ampdu_factor;
- ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ ra->af = sta->deflink.ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
cap |= STA_CAP_HT;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
cap |= STA_CAP_SGI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
cap |= STA_CAP_SGI_40;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
cap |= STA_CAP_TX_STBC;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
if (mvif->cap.ht_ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
@@ -1602,37 +1602,37 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
u8 af;
ra->supp_mode |= MODE_VHT;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
ra->af = max_t(u8, ra->af, af);
cap |= STA_CAP_VHT;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
cap |= STA_CAP_VHT_SGI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
cap |= STA_CAP_VHT_SGI_160;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
cap |= STA_CAP_VHT_TX_STBC;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
if (mvif->cap.vht_ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
mask->control[band].vht_mcs);
}
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
- if (sta->he_6ghz_capa.capa)
- ra->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ if (sta->deflink.he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 233998ca4857..b67615487910 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -1023,7 +1023,7 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
u16 fc, tid;
u32 val;
- if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
return;
tid = le32_get_bits(txwi[1], MT_TXD1_TID);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index d2ee1aaa3c81..ca9cf628eb10 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -385,7 +385,7 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
msta = container_of(wcid, struct mt76_sta, wcid);
sta = container_of(msta, struct ieee80211_sta, drv_priv);
- min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ min_factor = min(min_factor, sta->deflink.ht_cap.ampdu_factor);
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index f3dff8319a4c..f1fa0442a57f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -163,7 +163,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
- ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size <<= sta->deflink.ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
@@ -172,7 +172,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
txwi->flags =
cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density));
+ sta->deflink.ht_cap.ampdu_density));
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->flags = 0;
}
diff --git a/drivers/net/wireless/purelifi/Kconfig b/drivers/net/wireless/purelifi/Kconfig
new file mode 100644
index 000000000000..e39afec3dcae
--- /dev/null
+++ b/drivers/net/wireless/purelifi/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_PURELIFI
+ bool "pureLiFi devices"
+ default y
+ help
+ If you have a pureLiFi device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_PURELIFI
+
+source "drivers/net/wireless/purelifi/plfxlc/Kconfig"
+
+endif # WLAN_VENDOR_PURELIFI
diff --git a/drivers/net/wireless/purelifi/Makefile b/drivers/net/wireless/purelifi/Makefile
new file mode 100644
index 000000000000..0bd6880b022c
--- /dev/null
+++ b/drivers/net/wireless/purelifi/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLFXLC) := plfxlc/
diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig
new file mode 100644
index 000000000000..4e0be27a5e0e
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config PLFXLC
+ tristate "pureLiFi X, XL, XC device support"
+ depends on CFG80211 && MAC80211 && USB
+ help
+ This option adds support for pureLiFi LiFi wireless USB
+ adapters. The pureLiFi X, XL, XC USB devices are based on
+ 802.11 OFDM PHY but uses light as the transmission medium.
+ The driver supports common 802.11 encryption/authentication
+ methods including Open, WPA, WPA2-Personal and
+ WPA2-Enterprise (802.1X).
+
+ To compile this driver as a module, choose m here. The module will
+ be called plfxlc.
diff --git a/drivers/net/wireless/purelifi/plfxlc/Makefile b/drivers/net/wireless/purelifi/plfxlc/Makefile
new file mode 100644
index 000000000000..7ed954e871d6
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLFXLC) := plfxlc.o
+plfxlc-objs += chip.o firmware.o usb.o mac.o
diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.c b/drivers/net/wireless/purelifi/plfxlc/chip.c
new file mode 100644
index 000000000000..f4ef9ff97146
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/chip.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "chip.h"
+#include "mac.h"
+#include "usb.h"
+
+void plfxlc_chip_init(struct plfxlc_chip *chip,
+ struct ieee80211_hw *hw,
+ struct usb_interface *intf)
+{
+ memset(chip, 0, sizeof(*chip));
+ mutex_init(&chip->mutex);
+ plfxlc_usb_init(&chip->usb, hw, intf);
+}
+
+void plfxlc_chip_release(struct plfxlc_chip *chip)
+{
+ plfxlc_usb_release(&chip->usb);
+ mutex_destroy(&chip->mutex);
+}
+
+int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
+ u8 dtim_period, int type)
+{
+ if (!interval ||
+ (chip->beacon_set && chip->beacon_interval == interval))
+ return 0;
+
+ chip->beacon_interval = interval;
+ chip->beacon_set = true;
+ return plfxlc_usb_wreq(chip->usb.ez_usb,
+ &chip->beacon_interval,
+ sizeof(chip->beacon_interval),
+ USB_REQ_BEACON_INTERVAL_WR);
+}
+
+int plfxlc_chip_init_hw(struct plfxlc_chip *chip)
+{
+ unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip));
+ struct usb_device *udev = interface_to_usbdev(chip->usb.intf);
+
+ pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ le16_to_cpu(udev->descriptor.bcdDevice),
+ addr,
+ plfxlc_speed(udev->speed));
+
+ return plfxlc_set_beacon_interval(chip, 100, 0, 0);
+}
+
+int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value)
+{
+ int r;
+ __le16 radio_on = cpu_to_le16(value);
+
+ r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on,
+ sizeof(value), USB_REQ_POWER_WR);
+ if (r)
+ dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r);
+ return r;
+}
+
+int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip)
+{
+ plfxlc_usb_enable_tx(&chip->usb);
+ return plfxlc_usb_enable_rx(&chip->usb);
+}
+
+void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip)
+{
+ u8 value = 0;
+
+ plfxlc_usb_wreq(chip->usb.ez_usb,
+ &value, sizeof(value), USB_REQ_RXTX_WR);
+ plfxlc_usb_disable_rx(&chip->usb);
+ plfxlc_usb_disable_tx(&chip->usb);
+}
+
+int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate)
+{
+ int r;
+
+ if (!chip)
+ return -EINVAL;
+
+ r = plfxlc_usb_wreq(chip->usb.ez_usb,
+ &rate, sizeof(rate), USB_REQ_RATE_WR);
+ if (r)
+ dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r);
+ return r;
+}
diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.h b/drivers/net/wireless/purelifi/plfxlc/chip.h
new file mode 100644
index 000000000000..dc04bbed07ac
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/chip.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_CHIP_H
+#define PLFXLC_CHIP_H
+
+#include <net/mac80211.h>
+
+#include "usb.h"
+
+enum unit_type {
+ STA = 0,
+ AP = 1,
+};
+
+enum {
+ PLFXLC_RADIO_OFF = 0,
+ PLFXLC_RADIO_ON = 1,
+};
+
+struct plfxlc_chip {
+ struct plfxlc_usb usb;
+ struct mutex mutex; /* lock to protect chip data */
+ enum unit_type unit_type;
+ u16 link_led;
+ u8 beacon_set;
+ u16 beacon_interval;
+};
+
+struct plfxlc_mc_hash {
+ u32 low;
+ u32 high;
+};
+
+#define plfxlc_chip_dev(chip) (&(chip)->usb.intf->dev)
+
+void plfxlc_chip_init(struct plfxlc_chip *chip,
+ struct ieee80211_hw *hw,
+ struct usb_interface *intf);
+
+void plfxlc_chip_release(struct plfxlc_chip *chip);
+
+void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip);
+
+int plfxlc_chip_init_hw(struct plfxlc_chip *chip);
+
+int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip);
+
+int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate);
+
+int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
+ u8 dtim_period, int type);
+
+int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value);
+
+static inline struct plfxlc_chip *plfxlc_usb_to_chip(struct plfxlc_usb
+ *usb)
+{
+ return container_of(usb, struct plfxlc_chip, usb);
+}
+
+static inline void plfxlc_mc_add_all(struct plfxlc_mc_hash *hash)
+{
+ hash->low = 0xffffffff;
+ hash->high = 0xffffffff;
+}
+
+#endif /* PLFXLC_CHIP_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/firmware.c b/drivers/net/wireless/purelifi/plfxlc/firmware.c
new file mode 100644
index 000000000000..8a3529d07927
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/firmware.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/firmware.h>
+#include <linux/bitrev.h>
+
+#include "mac.h"
+#include "usb.h"
+
+static int send_vendor_request(struct usb_device *udev, int request,
+ unsigned char *buffer, int buffer_size)
+{
+ return usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ request, 0xC0, 0, 0,
+ buffer, buffer_size, PLF_USB_TIMEOUT);
+}
+
+static int send_vendor_command(struct usb_device *udev, int request,
+ unsigned char *buffer, int buffer_size)
+{
+ return usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ request, USB_TYPE_VENDOR /*0x40*/, 0, 0,
+ buffer, buffer_size, PLF_USB_TIMEOUT);
+}
+
+int plfxlc_download_fpga(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned char *fpga_dmabuff = NULL;
+ const struct firmware *fw = NULL;
+ int blk_tran_len = PLF_BULK_TLEN;
+ unsigned char *fw_data;
+ const char *fw_name;
+ int r, actual_length;
+ int fw_data_i = 0;
+
+ if ((le16_to_cpu(udev->descriptor.idVendor) ==
+ PURELIFI_X_VENDOR_ID_0) &&
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ PURELIFI_X_PRODUCT_ID_0)) {
+ fw_name = "plfxlc/lifi-x.bin";
+ dev_dbg(&intf->dev, "bin file for X selected\n");
+
+ } else if ((le16_to_cpu(udev->descriptor.idVendor)) ==
+ PURELIFI_XC_VENDOR_ID_0 &&
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ PURELIFI_XC_PRODUCT_ID_0)) {
+ fw_name = "plfxlc/lifi-xc.bin";
+ dev_dbg(&intf->dev, "bin file for XC selected\n");
+
+ } else {
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = request_firmware(&fw, fw_name, &intf->dev);
+ if (r) {
+ dev_err(&intf->dev, "request_firmware failed (%d)\n", r);
+ goto error;
+ }
+ fpga_dmabuff = kmalloc(PLF_FPGA_STATUS_LEN, GFP_KERNEL);
+
+ if (!fpga_dmabuff) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+ send_vendor_request(udev, PLF_VNDR_FPGA_SET_REQ,
+ fpga_dmabuff, PLF_FPGA_STATUS_LEN);
+
+ send_vendor_command(udev, PLF_VNDR_FPGA_SET_CMD, NULL, 0);
+
+ if (fpga_dmabuff[0] != PLF_FPGA_MG) {
+ dev_err(&intf->dev, "fpga_dmabuff[0] is wrong\n");
+ r = -EINVAL;
+ goto error_free_fw;
+ }
+
+ for (fw_data_i = 0; fw_data_i < fw->size;) {
+ int tbuf_idx;
+
+ if ((fw->size - fw_data_i) < blk_tran_len)
+ blk_tran_len = fw->size - fw_data_i;
+
+ fw_data = kmemdup(&fw->data[fw_data_i], blk_tran_len,
+ GFP_KERNEL);
+ if (!fw_data) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+
+ for (tbuf_idx = 0; tbuf_idx < blk_tran_len; tbuf_idx++) {
+ /* u8 bit reverse */
+ fw_data[tbuf_idx] = bitrev8(fw_data[tbuf_idx]);
+ }
+ r = usb_bulk_msg(udev,
+ usb_sndbulkpipe(interface_to_usbdev(intf),
+ fpga_dmabuff[0] & 0xff),
+ fw_data,
+ blk_tran_len,
+ &actual_length,
+ 2 * PLF_USB_TIMEOUT);
+
+ if (r)
+ dev_err(&intf->dev, "Bulk msg failed (%d)\n", r);
+
+ kfree(fw_data);
+ fw_data_i += blk_tran_len;
+ }
+
+ kfree(fpga_dmabuff);
+ fpga_dmabuff = kmalloc(PLF_FPGA_STATE_LEN, GFP_KERNEL);
+ if (!fpga_dmabuff) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+ memset(fpga_dmabuff, 0xff, PLF_FPGA_STATE_LEN);
+
+ send_vendor_request(udev, PLF_VNDR_FPGA_STATE_REQ, fpga_dmabuff,
+ PLF_FPGA_STATE_LEN);
+
+ dev_dbg(&intf->dev, "%*ph\n", 8, fpga_dmabuff);
+
+ if (fpga_dmabuff[0] != 0) {
+ r = -EINVAL;
+ goto error_free_fw;
+ }
+
+ send_vendor_command(udev, PLF_VNDR_FPGA_STATE_CMD, NULL, 0);
+
+ msleep(PLF_MSLEEP_TIME);
+
+error_free_fw:
+ kfree(fpga_dmabuff);
+ release_firmware(fw);
+error:
+ return r;
+}
+
+int plfxlc_download_xl_firmware(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ const struct firmware *fwp = NULL;
+ struct plfxlc_firmware_file file = {0};
+ const char *fw_pack;
+ int s, r;
+ u8 *buf;
+ u32 i;
+
+ r = send_vendor_command(udev, PLF_VNDR_XL_FW_CMD, NULL, 0);
+ msleep(PLF_MSLEEP_TIME);
+
+ if (r) {
+ dev_err(&intf->dev, "vendor command failed (%d)\n", r);
+ return -EINVAL;
+ }
+ /* Code for single pack file download */
+
+ fw_pack = "plfxlc/lifi-xl.bin";
+
+ r = request_firmware(&fwp, fw_pack, &intf->dev);
+ if (r) {
+ dev_err(&intf->dev, "Request_firmware failed (%d)\n", r);
+ return -EINVAL;
+ }
+ file.total_files = get_unaligned_le32(&fwp->data[0]);
+ file.total_size = get_unaligned_le32(&fwp->size);
+
+ dev_dbg(&intf->dev, "XL Firmware (%d, %d)\n",
+ file.total_files, file.total_size);
+
+ buf = kzalloc(PLF_XL_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ release_firmware(fwp);
+ return -ENOMEM;
+ }
+
+ if (file.total_files > 10) {
+ dev_err(&intf->dev, "Too many files (%d)\n", file.total_files);
+ release_firmware(fwp);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ /* Download firmware files in multiple steps */
+ for (s = 0; s < file.total_files; s++) {
+ buf[0] = s;
+ r = send_vendor_command(udev, PLF_VNDR_XL_FILE_CMD, buf,
+ PLF_XL_BUF_LEN);
+
+ if (s < file.total_files - 1)
+ file.size = get_unaligned_le32(&fwp->data[4 + ((s + 1) * 4)])
+ - get_unaligned_le32(&fwp->data[4 + (s) * 4]);
+ else
+ file.size = file.total_size -
+ get_unaligned_le32(&fwp->data[4 + (s) * 4]);
+
+ if (file.size > file.total_size || file.size > 60000) {
+ dev_err(&intf->dev, "File size is too large (%d)\n", file.size);
+ break;
+ }
+
+ file.start_addr = get_unaligned_le32(&fwp->data[4 + (s * 4)]);
+
+ if (file.size % PLF_XL_BUF_LEN && s < 2)
+ file.size += PLF_XL_BUF_LEN - file.size % PLF_XL_BUF_LEN;
+
+ file.control_packets = file.size / PLF_XL_BUF_LEN;
+
+ for (i = 0; i < file.control_packets; i++) {
+ memcpy(buf,
+ &fwp->data[file.start_addr + (i * PLF_XL_BUF_LEN)],
+ PLF_XL_BUF_LEN);
+ r = send_vendor_command(udev, PLF_VNDR_XL_DATA_CMD, buf,
+ PLF_XL_BUF_LEN);
+ }
+ dev_dbg(&intf->dev, "fw-dw step=%d,r=%d size=%d\n", s, r,
+ file.size);
+ }
+ release_firmware(fwp);
+ kfree(buf);
+
+ /* Code for single pack file download ends fw download finish */
+
+ r = send_vendor_command(udev, PLF_VNDR_XL_EX_CMD, NULL, 0);
+ dev_dbg(&intf->dev, "Download fpga (4) (%d)\n", r);
+
+ return 0;
+}
+
+int plfxlc_upload_mac_and_serial(struct usb_interface *intf,
+ unsigned char *hw_address,
+ unsigned char *serial_number)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long long firmware_version;
+ unsigned char *dma_buffer = NULL;
+
+ dma_buffer = kmalloc(PLF_SERIAL_LEN, GFP_KERNEL);
+ if (!dma_buffer)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(ETH_ALEN > PLF_SERIAL_LEN);
+ BUILD_BUG_ON(PLF_FW_VER_LEN > PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_MAC_VENDOR_REQUEST, dma_buffer,
+ ETH_ALEN);
+
+ memcpy(hw_address, dma_buffer, ETH_ALEN);
+
+ send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST,
+ dma_buffer, PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST,
+ dma_buffer, PLF_SERIAL_LEN);
+
+ memcpy(serial_number, dma_buffer, PLF_SERIAL_LEN);
+
+ memset(dma_buffer, 0x00, PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_FIRMWARE_VERSION_VENDOR_REQUEST,
+ (unsigned char *)dma_buffer, PLF_FW_VER_LEN);
+
+ memcpy(&firmware_version, dma_buffer, PLF_FW_VER_LEN);
+
+ dev_info(&intf->dev, "Firmware Version: %llu\n", firmware_version);
+ kfree(dma_buffer);
+
+ dev_dbg(&intf->dev, "Mac: %pM\n", hw_address);
+
+ return 0;
+}
+
diff --git a/drivers/net/wireless/purelifi/plfxlc/intf.h b/drivers/net/wireless/purelifi/plfxlc/intf.h
new file mode 100644
index 000000000000..5ae89343b579
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/intf.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#define PURELIFI_BYTE_NUM_ALIGNMENT 4
+#define ETH_ALEN 6
+#define AP_USER_LIMIT 8
+
+#define PLF_VNDR_FPGA_STATE_REQ 0x30
+#define PLF_VNDR_FPGA_SET_REQ 0x33
+#define PLF_VNDR_FPGA_SET_CMD 0x34
+#define PLF_VNDR_FPGA_STATE_CMD 0x35
+
+#define PLF_VNDR_XL_FW_CMD 0x80
+#define PLF_VNDR_XL_DATA_CMD 0x81
+#define PLF_VNDR_XL_FILE_CMD 0x82
+#define PLF_VNDR_XL_EX_CMD 0x83
+
+#define PLF_MAC_VENDOR_REQUEST 0x36
+#define PLF_SERIAL_NUMBER_VENDOR_REQUEST 0x37
+#define PLF_FIRMWARE_VERSION_VENDOR_REQUEST 0x39
+#define PLF_SERIAL_LEN 14
+#define PLF_FW_VER_LEN 8
+
+struct rx_status {
+ __be16 rssi;
+ u8 rate_idx;
+ u8 pad;
+ __be64 crc_error_count;
+} __packed;
+
+enum plf_usb_req_enum {
+ USB_REQ_TEST_WR = 0,
+ USB_REQ_MAC_WR = 1,
+ USB_REQ_POWER_WR = 2,
+ USB_REQ_RXTX_WR = 3,
+ USB_REQ_BEACON_WR = 4,
+ USB_REQ_BEACON_INTERVAL_WR = 5,
+ USB_REQ_RTS_CTS_RATE_WR = 6,
+ USB_REQ_HASH_WR = 7,
+ USB_REQ_DATA_TX = 8,
+ USB_REQ_RATE_WR = 9,
+ USB_REQ_SET_FREQ = 15
+};
+
+struct plf_usb_req {
+ __be32 id; /* should be plf_usb_req_enum */
+ __be32 len;
+ u8 buf[512];
+};
+
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
new file mode 100644
index 000000000000..90e552532701
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/gpio.h>
+#include <linux/jiffies.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "chip.h"
+#include "mac.h"
+#include "usb.h"
+
+static const struct ieee80211_rate plfxlc_rates[] = {
+ { .bitrate = 10,
+ .hw_value = PURELIFI_CCK_RATE_1M,
+ .flags = 0 },
+ { .bitrate = 20,
+ .hw_value = PURELIFI_CCK_RATE_2M,
+ .hw_value_short = PURELIFI_CCK_RATE_2M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = PURELIFI_CCK_RATE_5_5M,
+ .hw_value_short = PURELIFI_CCK_RATE_5_5M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = PURELIFI_CCK_RATE_11M,
+ .hw_value_short = PURELIFI_CCK_RATE_11M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60,
+ .hw_value = PURELIFI_OFDM_RATE_6M,
+ .flags = 0 },
+ { .bitrate = 90,
+ .hw_value = PURELIFI_OFDM_RATE_9M,
+ .flags = 0 },
+ { .bitrate = 120,
+ .hw_value = PURELIFI_OFDM_RATE_12M,
+ .flags = 0 },
+ { .bitrate = 180,
+ .hw_value = PURELIFI_OFDM_RATE_18M,
+ .flags = 0 },
+ { .bitrate = 240,
+ .hw_value = PURELIFI_OFDM_RATE_24M,
+ .flags = 0 },
+ { .bitrate = 360,
+ .hw_value = PURELIFI_OFDM_RATE_36M,
+ .flags = 0 },
+ { .bitrate = 480,
+ .hw_value = PURELIFI_OFDM_RATE_48M,
+ .flags = 0 },
+ { .bitrate = 540,
+ .hw_value = PURELIFI_OFDM_RATE_54M,
+ .flags = 0 }
+};
+
+static const struct ieee80211_channel plfxlc_channels[] = {
+ { .center_freq = 2412, .hw_value = 1 },
+ { .center_freq = 2417, .hw_value = 2 },
+ { .center_freq = 2422, .hw_value = 3 },
+ { .center_freq = 2427, .hw_value = 4 },
+ { .center_freq = 2432, .hw_value = 5 },
+ { .center_freq = 2437, .hw_value = 6 },
+ { .center_freq = 2442, .hw_value = 7 },
+ { .center_freq = 2447, .hw_value = 8 },
+ { .center_freq = 2452, .hw_value = 9 },
+ { .center_freq = 2457, .hw_value = 10 },
+ { .center_freq = 2462, .hw_value = 11 },
+ { .center_freq = 2467, .hw_value = 12 },
+ { .center_freq = 2472, .hw_value = 13 },
+ { .center_freq = 2484, .hw_value = 14 },
+};
+
+int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address)
+{
+ SET_IEEE80211_PERM_ADDR(hw, hw_address);
+ return 0;
+}
+
+int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct plfxlc_chip *chip = &mac->chip;
+ int r;
+
+ r = plfxlc_chip_init_hw(chip);
+ if (r) {
+ dev_warn(plfxlc_mac_dev(mac), "init hw failed (%d)\n", r);
+ return r;
+ }
+
+ dev_dbg(plfxlc_mac_dev(mac), "irq_disabled (%d)\n", irqs_disabled());
+ regulatory_hint(hw->wiphy, "00");
+ return r;
+}
+
+void plfxlc_mac_release(struct plfxlc_mac *mac)
+{
+ plfxlc_chip_release(&mac->chip);
+ lockdep_assert_held(&mac->lock);
+}
+
+int plfxlc_op_start(struct ieee80211_hw *hw)
+{
+ plfxlc_hw_mac(hw)->chip.usb.initialized = 1;
+ return 0;
+}
+
+void plfxlc_op_stop(struct ieee80211_hw *hw)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ clear_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+}
+
+int plfxlc_restore_settings(struct plfxlc_mac *mac)
+{
+ int beacon_interval, beacon_period;
+ struct sk_buff *beacon;
+
+ spin_lock_irq(&mac->lock);
+ beacon_interval = mac->beacon.interval;
+ beacon_period = mac->beacon.period;
+ spin_unlock_irq(&mac->lock);
+
+ if (mac->type != NL80211_IFTYPE_ADHOC)
+ return 0;
+
+ if (mac->vif) {
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ /*beacon is hardcoded in firmware */
+ kfree_skb(beacon);
+ /* Returned skb is used only once and lowlevel
+ * driver is responsible for freeing it.
+ */
+ }
+ }
+
+ plfxlc_set_beacon_interval(&mac->chip, beacon_interval,
+ beacon_period, mac->type);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ return 0;
+}
+
+static void plfxlc_mac_tx_status(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ int ackssi,
+ struct tx_status *tx_status)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int success = 1;
+
+ ieee80211_tx_info_clear_status(info);
+ if (tx_status)
+ success = !tx_status->failure;
+
+ if (success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ info->status.ack_signal = 50;
+ ieee80211_tx_status_irqsafe(hw, skb);
+}
+
+void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = info->rate_driver_data[0];
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct sk_buff_head *q = NULL;
+
+ ieee80211_tx_info_clear_status(info);
+ skb_pull(skb, sizeof(struct plfxlc_ctrlset));
+
+ if (unlikely(error ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
+ ieee80211_tx_status_irqsafe(hw, skb);
+ return;
+ }
+
+ q = &mac->ack_wait_queue;
+
+ skb_queue_tail(q, skb);
+ while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) {
+ plfxlc_mac_tx_status(hw, skb_dequeue(q),
+ mac->ack_pending ?
+ mac->ack_signal : 0,
+ NULL);
+ mac->ack_pending = 0;
+ }
+}
+
+static int plfxlc_fill_ctrlset(struct plfxlc_mac *mac, struct sk_buff *skb)
+{
+ unsigned int frag_len = skb->len;
+ struct plfxlc_ctrlset *cs;
+ u32 temp_payload_len = 0;
+ unsigned int tmp;
+ u32 temp_len = 0;
+
+ if (skb_headroom(skb) < sizeof(struct plfxlc_ctrlset)) {
+ dev_dbg(plfxlc_mac_dev(mac), "Not enough hroom(1)\n");
+ return 1;
+ }
+
+ cs = (void *)skb_push(skb, sizeof(struct plfxlc_ctrlset));
+ temp_payload_len = frag_len;
+ temp_len = temp_payload_len +
+ sizeof(struct plfxlc_ctrlset) -
+ sizeof(cs->id) - sizeof(cs->len);
+
+ /* Data packet lengths must be multiple of four bytes and must
+ * not be a multiple of 512 bytes. First, it is attempted to
+ * append the data packet in the tailroom of the skb. In rare
+ * occasions, the tailroom is too small. In this case, the
+ * content of the packet is shifted into the headroom of the skb
+ * by memcpy. Headroom is allocated at startup (below in this
+ * file). Therefore, there will be always enough headroom. The
+ * call skb_headroom is an additional safety which might be
+ * dropped.
+ */
+ /* check if 32 bit aligned and align data */
+ tmp = skb->len & 3;
+ if (tmp) {
+ if (skb_tailroom(skb) < (3 - tmp)) {
+ if (skb_headroom(skb) >= 4 - tmp) {
+ u8 len;
+ u8 *src_pt;
+ u8 *dest_pt;
+
+ len = skb->len;
+ src_pt = skb->data;
+ dest_pt = skb_push(skb, 4 - tmp);
+ memmove(dest_pt, src_pt, len);
+ } else {
+ return -ENOBUFS;
+ }
+ } else {
+ skb_put(skb, 4 - tmp);
+ }
+ temp_len += 4 - tmp;
+ }
+
+ /* check if not multiple of 512 and align data */
+ tmp = skb->len & 0x1ff;
+ if (!tmp) {
+ if (skb_tailroom(skb) < 4) {
+ if (skb_headroom(skb) >= 4) {
+ u8 len = skb->len;
+ u8 *src_pt = skb->data;
+ u8 *dest_pt = skb_push(skb, 4);
+
+ memmove(dest_pt, src_pt, len);
+ } else {
+ /* should never happen because
+ * sufficient headroom was reserved
+ */
+ return -ENOBUFS;
+ }
+ } else {
+ skb_put(skb, 4);
+ }
+ temp_len += 4;
+ }
+
+ cs->id = cpu_to_be32(USB_REQ_DATA_TX);
+ cs->len = cpu_to_be32(temp_len);
+ cs->payload_len_nw = cpu_to_be32(temp_payload_len);
+
+ return 0;
+}
+
+static void plfxlc_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct plfxlc_header *plhdr = (void *)skb->data;
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct plfxlc_usb *usb = &mac->chip.usb;
+ unsigned long flags;
+ int r;
+
+ r = plfxlc_fill_ctrlset(mac, skb);
+ if (r)
+ goto fail;
+
+ info->rate_driver_data[0] = hw;
+
+ if (plhdr->frametype == IEEE80211_FTYPE_DATA) {
+ u8 *dst_mac = plhdr->dmac;
+ u8 sidx;
+ bool found = false;
+ struct plfxlc_usb_tx *tx = &usb->tx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++) {
+ if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (memcmp(tx->station[sidx].mac, dst_mac, ETH_ALEN))
+ continue;
+ found = true;
+ break;
+ }
+
+ /* Default to broadcast address for unknown MACs */
+ if (!found)
+ sidx = STA_BROADCAST_INDEX;
+
+ /* Stop OS from sending packets, if the queue is half full */
+ if (skb_queue_len(&tx->station[sidx].data_list) > 60)
+ ieee80211_stop_queues(plfxlc_usb_to_hw(usb));
+
+ /* Schedule packet for transmission if queue is not full */
+ if (skb_queue_len(&tx->station[sidx].data_list) > 256)
+ goto fail;
+ skb_queue_tail(&tx->station[sidx].data_list, skb);
+ plfxlc_send_packet_from_data_queue(usb);
+
+ } else {
+ spin_lock_irqsave(&usb->tx.lock, flags);
+ r = plfxlc_usb_wreq_async(&mac->chip.usb, skb->data, skb->len,
+ USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb);
+ spin_unlock_irqrestore(&usb->tx.lock, flags);
+ if (r)
+ goto fail;
+ }
+ return;
+
+fail:
+ dev_kfree_skb(skb);
+}
+
+static int plfxlc_filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
+ struct ieee80211_rx_status *stats)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct sk_buff_head *q;
+ int i, position = 0;
+ unsigned long flags;
+ struct sk_buff *skb;
+ bool found = false;
+
+ if (!ieee80211_is_ack(rx_hdr->frame_control))
+ return 0;
+
+ dev_dbg(plfxlc_mac_dev(mac), "ACK Received\n");
+
+ /* code based on zy driver, this logic may need fix */
+ q = &mac->ack_wait_queue;
+ spin_lock_irqsave(&q->lock, flags);
+
+ skb_queue_walk(q, skb) {
+ struct ieee80211_hdr *tx_hdr;
+
+ position++;
+
+ if (mac->ack_pending && skb_queue_is_first(q, skb))
+ continue;
+ if (mac->ack_pending == 0)
+ break;
+
+ tx_hdr = (struct ieee80211_hdr *)skb->data;
+ if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1))) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ for (i = 1; i < position; i++)
+ skb = __skb_dequeue(q);
+ if (i == position) {
+ plfxlc_mac_tx_status(hw, skb,
+ mac->ack_pending ?
+ mac->ack_signal : 0,
+ NULL);
+ mac->ack_pending = 0;
+ }
+
+ mac->ack_pending = skb_queue_len(q) ? 1 : 0;
+ mac->ack_signal = stats->signal;
+ }
+
+ spin_unlock_irqrestore(&q->lock, flags);
+ return 1;
+}
+
+int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
+ unsigned int length)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct ieee80211_rx_status stats;
+ const struct rx_status *status;
+ unsigned int payload_length;
+ struct plfxlc_usb_tx *tx;
+ struct sk_buff *skb;
+ int need_padding;
+ __le16 fc;
+ int sidx;
+
+ /* Packet blockade during disabled interface. */
+ if (!mac->vif)
+ return 0;
+
+ status = (struct rx_status *)buffer;
+
+ memset(&stats, 0, sizeof(stats));
+
+ stats.flag = 0;
+ stats.freq = 2412;
+ stats.band = NL80211_BAND_LC;
+ mac->rssi = -15 * be16_to_cpu(status->rssi) / 10;
+
+ stats.signal = mac->rssi;
+
+ if (status->rate_idx > 7)
+ stats.rate_idx = 0;
+ else
+ stats.rate_idx = status->rate_idx;
+
+ mac->crc_errors = be64_to_cpu(status->crc_error_count);
+
+ /* TODO bad frame check for CRC error*/
+ if (plfxlc_filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) &&
+ !mac->pass_ctrl)
+ return 0;
+
+ buffer += sizeof(struct rx_status);
+ payload_length = get_unaligned_be32(buffer);
+
+ if (payload_length > 1560) {
+ dev_err(plfxlc_mac_dev(mac), " > MTU %u\n", payload_length);
+ return 0;
+ }
+ buffer += sizeof(u32);
+
+ fc = get_unaligned((__le16 *)buffer);
+ need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
+
+ tx = &mac->chip.usb.tx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (memcmp(&buffer[10], tx->station[sidx].mac, ETH_ALEN))
+ continue;
+ if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) {
+ tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG;
+ break;
+ }
+ }
+
+ if (sidx == MAX_STA_NUM - 1) {
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (tx->station[sidx].flag & STATION_CONNECTED_FLAG)
+ continue;
+ memcpy(tx->station[sidx].mac, &buffer[10], ETH_ALEN);
+ tx->station[sidx].flag |= STATION_CONNECTED_FLAG;
+ tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG;
+ break;
+ }
+ }
+
+ switch (buffer[0]) {
+ case IEEE80211_STYPE_PROBE_REQ:
+ dev_dbg(plfxlc_mac_dev(mac), "Probe request\n");
+ break;
+ case IEEE80211_STYPE_ASSOC_REQ:
+ dev_dbg(plfxlc_mac_dev(mac), "Association request\n");
+ break;
+ case IEEE80211_STYPE_AUTH:
+ dev_dbg(plfxlc_mac_dev(mac), "Authentication req\n");
+ break;
+ case IEEE80211_FTYPE_DATA:
+ dev_dbg(plfxlc_mac_dev(mac), "802.11 data frame\n");
+ break;
+ }
+
+ skb = dev_alloc_skb(payload_length + (need_padding ? 2 : 0));
+ if (!skb)
+ return -ENOMEM;
+
+ if (need_padding)
+ /* Make sure that the payload data is 4 byte aligned. */
+ skb_reserve(skb, 2);
+
+ skb_put_data(skb, buffer, payload_length);
+ memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
+ ieee80211_rx_irqsafe(hw, skb);
+ return 0;
+}
+
+static int plfxlc_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ static const char * const iftype80211[] = {
+ [NL80211_IFTYPE_STATION] = "Station",
+ [NL80211_IFTYPE_ADHOC] = "Adhoc"
+ };
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
+ return -EOPNOTSUPP;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_STATION) {
+ dev_dbg(plfxlc_mac_dev(mac), "%s %s\n", __func__,
+ iftype80211[vif->type]);
+ mac->type = vif->type;
+ mac->vif = vif;
+ return 0;
+ }
+ dev_dbg(plfxlc_mac_dev(mac), "unsupported iftype\n");
+ return -EOPNOTSUPP;
+}
+
+static void plfxlc_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ mac->type = NL80211_IFTYPE_UNSPECIFIED;
+ mac->vif = NULL;
+}
+
+static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+#define SUPPORTED_FIF_FLAGS \
+ (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+ FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
+static void plfxlc_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct plfxlc_mc_hash hash = {
+ .low = multicast,
+ .high = multicast >> 32,
+ };
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ unsigned long flags;
+
+ /* Only deal with supported flags */
+ *new_flags &= SUPPORTED_FIF_FLAGS;
+
+ /* If multicast parameter
+ * (as returned by plfxlc_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
+ if (*new_flags & (FIF_ALLMULTI))
+ plfxlc_mc_add_all(&hash);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL);
+ mac->pass_ctrl = !!(*new_flags & FIF_CONTROL);
+ mac->multicast_hash = hash;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ /* no handling required for FIF_OTHER_BSS as we don't currently
+ * do BSSID filtering
+ */
+ /* FIXME: in future it would be nice to enable the probe response
+ * filter (so that the driver doesn't see them) until
+ * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd
+ * have to schedule work to enable prbresp reception, which might
+ * happen too late. For now we'll just listen and forward them all the
+ * time.
+ */
+}
+
+static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ int associated;
+
+ dev_dbg(plfxlc_mac_dev(mac), "changes: %x\n", changes);
+
+ if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */
+ associated = is_valid_ether_addr(bss_conf->bssid);
+ goto exit_all;
+ }
+ /* for ADHOC */
+ associated = true;
+ if (changes & BSS_CHANGED_BEACON) {
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ /*beacon is hardcoded in firmware */
+ kfree_skb(beacon);
+ /*Returned skb is used only once and
+ * low-level driver is
+ * responsible for freeing it.
+ */
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ u16 interval = 0;
+ u8 period = 0;
+
+ if (bss_conf->enable_beacon) {
+ period = bss_conf->dtim_period;
+ interval = bss_conf->beacon_int;
+ }
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.period = period;
+ mac->beacon.interval = interval;
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ plfxlc_set_beacon_interval(&mac->chip, interval,
+ period, mac->type);
+ }
+exit_all:
+ spin_lock_irq(&mac->lock);
+ mac->associated = associated;
+ spin_unlock_irq(&mac->lock);
+}
+
+static int plfxlc_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ stats->dot11ACKFailureCount = 0;
+ stats->dot11RTSFailureCount = 0;
+ stats->dot11FCSErrorCount = 0;
+ stats->dot11RTSSuccessCount = 0;
+ return 0;
+}
+
+static const char et_strings[][ETH_GSTRING_LEN] = {
+ "phy_rssi",
+ "phy_rx_crc_err"
+};
+
+static int plfxlc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(et_strings);
+
+ return 0;
+}
+
+static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *et_strings, sizeof(et_strings));
+}
+
+static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ data[0] = mac->rssi;
+ data[1] = mac->crc_errors;
+}
+
+static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static const struct ieee80211_ops plfxlc_ops = {
+ .tx = plfxlc_op_tx,
+ .start = plfxlc_op_start,
+ .stop = plfxlc_op_stop,
+ .add_interface = plfxlc_op_add_interface,
+ .remove_interface = plfxlc_op_remove_interface,
+ .set_rts_threshold = plfxlc_set_rts_threshold,
+ .config = plfxlc_op_config,
+ .configure_filter = plfxlc_op_configure_filter,
+ .bss_info_changed = plfxlc_op_bss_info_changed,
+ .get_stats = plfxlc_get_stats,
+ .get_et_sset_count = plfxlc_get_et_sset_count,
+ .get_et_stats = plfxlc_get_et_stats,
+ .get_et_strings = plfxlc_get_et_strings,
+};
+
+struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw;
+ struct plfxlc_mac *mac;
+
+ hw = ieee80211_alloc_hw(sizeof(struct plfxlc_mac), &plfxlc_ops);
+ if (!hw) {
+ dev_dbg(&intf->dev, "out of memory\n");
+ return NULL;
+ }
+ set_wiphy_dev(hw->wiphy, &intf->dev);
+
+ mac = plfxlc_hw_mac(hw);
+ memset(mac, 0, sizeof(*mac));
+ spin_lock_init(&mac->lock);
+ mac->hw = hw;
+
+ mac->type = NL80211_IFTYPE_UNSPECIFIED;
+
+ memcpy(mac->channels, plfxlc_channels, sizeof(plfxlc_channels));
+ memcpy(mac->rates, plfxlc_rates, sizeof(plfxlc_rates));
+ mac->band.n_bitrates = ARRAY_SIZE(plfxlc_rates);
+ mac->band.bitrates = mac->rates;
+ mac->band.n_channels = ARRAY_SIZE(plfxlc_channels);
+ mac->band.channels = mac->channels;
+ hw->wiphy->bands[NL80211_BAND_LC] = &mac->band;
+ hw->conf.chandef.width = NL80211_CHAN_WIDTH_20;
+
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ hw->max_signal = 100;
+ hw->queues = 1;
+ /* 4 for 32 bit alignment if no tailroom */
+ hw->extra_tx_headroom = sizeof(struct plfxlc_ctrlset) + 4;
+ /* Tell mac80211 that we support multi rate retries */
+ hw->max_rates = IEEE80211_TX_MAX_RATES;
+ hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
+
+ skb_queue_head_init(&mac->ack_wait_queue);
+ mac->ack_pending = 0;
+
+ plfxlc_chip_init(&mac->chip, hw, intf);
+
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ return hw;
+}
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
new file mode 100644
index 000000000000..49b92413729b
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_MAC_H
+#define PLFXLC_MAC_H
+
+#include <linux/kernel.h>
+#include <net/mac80211.h>
+
+#include "chip.h"
+
+#define PURELIFI_CCK 0x00
+#define PURELIFI_OFDM 0x10
+#define PURELIFI_CCK_PREA_SHORT 0x20
+
+#define PURELIFI_OFDM_PLCP_RATE_6M 0xb
+#define PURELIFI_OFDM_PLCP_RATE_9M 0xf
+#define PURELIFI_OFDM_PLCP_RATE_12M 0xa
+#define PURELIFI_OFDM_PLCP_RATE_18M 0xe
+#define PURELIFI_OFDM_PLCP_RATE_24M 0x9
+#define PURELIFI_OFDM_PLCP_RATE_36M 0xd
+#define PURELIFI_OFDM_PLCP_RATE_48M 0x8
+#define PURELIFI_OFDM_PLCP_RATE_54M 0xc
+
+#define PURELIFI_CCK_RATE_1M (PURELIFI_CCK | 0x00)
+#define PURELIFI_CCK_RATE_2M (PURELIFI_CCK | 0x01)
+#define PURELIFI_CCK_RATE_5_5M (PURELIFI_CCK | 0x02)
+#define PURELIFI_CCK_RATE_11M (PURELIFI_CCK | 0x03)
+#define PURELIFI_OFDM_RATE_6M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_6M)
+#define PURELIFI_OFDM_RATE_9M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_9M)
+#define PURELIFI_OFDM_RATE_12M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_12M)
+#define PURELIFI_OFDM_RATE_18M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_18M)
+#define PURELIFI_OFDM_RATE_24M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_24M)
+#define PURELIFI_OFDM_RATE_36M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_36M)
+#define PURELIFI_OFDM_RATE_48M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_48M)
+#define PURELIFI_OFDM_RATE_54M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_54M)
+
+#define PURELIFI_RX_ERROR 0x80
+#define PURELIFI_RX_CRC32_ERROR 0x10
+
+#define PLF_REGDOMAIN_FCC 0x10
+#define PLF_REGDOMAIN_IC 0x20
+#define PLF_REGDOMAIN_ETSI 0x30
+#define PLF_REGDOMAIN_SPAIN 0x31
+#define PLF_REGDOMAIN_FRANCE 0x32
+#define PLF_REGDOMAIN_JAPAN_2 0x40
+#define PLF_REGDOMAIN_JAPAN 0x41
+#define PLF_REGDOMAIN_JAPAN_3 0x49
+
+#define PLF_RX_ERROR 0x80
+#define PLF_RX_CRC32_ERROR 0x10
+
+enum {
+ MODULATION_RATE_BPSK_1_2 = 0,
+ MODULATION_RATE_BPSK_3_4,
+ MODULATION_RATE_QPSK_1_2,
+ MODULATION_RATE_QPSK_3_4,
+ MODULATION_RATE_QAM16_1_2,
+ MODULATION_RATE_QAM16_3_4,
+ MODULATION_RATE_QAM64_1_2,
+ MODULATION_RATE_QAM64_3_4,
+ MODULATION_RATE_AUTO,
+ MODULATION_RATE_NUM
+};
+
+#define plfxlc_mac_dev(mac) plfxlc_chip_dev(&(mac)->chip)
+
+#define PURELIFI_MAC_STATS_BUFFER_SIZE 16
+#define PURELIFI_MAC_MAX_ACK_WAITERS 50
+
+struct plfxlc_ctrlset {
+ /* id should be plf_usb_req_enum */
+ __be32 id;
+ __be32 len;
+ u8 modulation;
+ u8 control;
+ u8 service;
+ u8 pad;
+ __le16 packet_length;
+ __le16 current_length;
+ __le16 next_frame_length;
+ __le16 tx_length;
+ __be32 payload_len_nw;
+} __packed;
+
+/* overlay */
+struct plfxlc_header {
+ struct plfxlc_ctrlset plf_ctrl;
+ u32 frametype;
+ u8 *dmac;
+} __packed;
+
+struct tx_status {
+ u8 type;
+ u8 id;
+ u8 rate;
+ u8 pad;
+ u8 mac[ETH_ALEN];
+ u8 retry;
+ u8 failure;
+} __packed;
+
+struct beacon {
+ struct delayed_work watchdog_work;
+ struct sk_buff *cur_beacon;
+ unsigned long last_update;
+ u16 interval;
+ u8 period;
+};
+
+enum plfxlc_device_flags {
+ PURELIFI_DEVICE_RUNNING,
+};
+
+struct plfxlc_mac {
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ struct beacon beacon;
+ struct work_struct set_rts_cts_work;
+ struct work_struct process_intr;
+ struct plfxlc_mc_hash multicast_hash;
+ struct sk_buff_head ack_wait_queue;
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct plfxlc_chip chip;
+ spinlock_t lock; /* lock for mac data */
+ u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
+ char serial_number[PURELIFI_SERIAL_LEN];
+ unsigned char hw_address[ETH_ALEN];
+ u8 default_regdomain;
+ unsigned long flags;
+ bool pass_failed_fcs;
+ bool pass_ctrl;
+ bool ack_pending;
+ int ack_signal;
+ int associated;
+ u8 regdomain;
+ u8 channel;
+ int type;
+ u64 crc_errors;
+ u64 rssi;
+};
+
+static inline struct plfxlc_mac *
+plfxlc_hw_mac(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct plfxlc_mac *
+plfxlc_chip_to_mac(struct plfxlc_chip *chip)
+{
+ return container_of(chip, struct plfxlc_mac, chip);
+}
+
+static inline struct plfxlc_mac *
+plfxlc_usb_to_mac(struct plfxlc_usb *usb)
+{
+ return plfxlc_chip_to_mac(plfxlc_usb_to_chip(usb));
+}
+
+static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac)
+{
+ return mac->hw->wiphy->perm_addr;
+}
+
+struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf);
+void plfxlc_mac_release(struct plfxlc_mac *mac);
+
+int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address);
+int plfxlc_mac_init_hw(struct ieee80211_hw *hw);
+
+int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
+ unsigned int length);
+void plfxlc_mac_tx_failed(struct urb *urb);
+void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error);
+int plfxlc_op_start(struct ieee80211_hw *hw);
+void plfxlc_op_stop(struct ieee80211_hw *hw);
+int plfxlc_restore_settings(struct plfxlc_mac *mac);
+
+#endif /* PLFXLC_MAC_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
new file mode 100644
index 000000000000..d0e98b2f1365
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include <linux/sysfs.h>
+
+#include "mac.h"
+#include "usb.h"
+#include "chip.h"
+
+static const struct usb_device_id usb_ids[] = {
+ { USB_DEVICE(PURELIFI_X_VENDOR_ID_0, PURELIFI_X_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_X },
+ { USB_DEVICE(PURELIFI_XC_VENDOR_ID_0, PURELIFI_XC_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_XC },
+ { USB_DEVICE(PURELIFI_XL_VENDOR_ID_0, PURELIFI_XL_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_XL },
+ {}
+};
+
+void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ u8 last_served_sidx;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ last_served_sidx = usb->sidx;
+ do {
+ usb->sidx = (usb->sidx + 1) % MAX_STA_NUM;
+ if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG))
+ skb = skb_peek(&tx->station[usb->sidx].data_list);
+ } while ((usb->sidx != last_served_sidx) && (!skb));
+
+ if (skb) {
+ skb = skb_dequeue(&tx->station[usb->sidx].data_list);
+ plfxlc_usb_wreq_async(usb, skb->data, skb->len, USB_REQ_DATA_TX,
+ plfxlc_tx_urb_complete, skb);
+ if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60)
+ ieee80211_wake_queues(plfxlc_usb_to_hw(usb));
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+static void handle_rx_packet(struct plfxlc_usb *usb, const u8 *buffer,
+ unsigned int length)
+{
+ plfxlc_mac_rx(plfxlc_usb_to_hw(usb), buffer, length);
+}
+
+static void rx_urb_complete(struct urb *urb)
+{
+ struct plfxlc_usb_tx *tx;
+ struct plfxlc_usb *usb;
+ unsigned int length;
+ const u8 *buffer;
+ u16 status;
+ u8 sidx;
+ int r;
+
+ if (!urb) {
+ pr_err("urb is NULL\n");
+ return;
+ }
+ if (!urb->context) {
+ pr_err("urb ctx is NULL\n");
+ return;
+ }
+ usb = urb->context;
+
+ if (usb->initialized != 1) {
+ pr_err("usb is not initialized\n");
+ return;
+ }
+
+ tx = &usb->tx;
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
+ default:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ if (tx->submitted_urbs++ < PURELIFI_URB_RETRY_MAX) {
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit %d", urb,
+ tx->submitted_urbs++);
+ goto resubmit;
+ } else {
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p max resubmits reached", urb);
+ tx->submitted_urbs = 0;
+ return;
+ }
+ }
+
+ buffer = urb->transfer_buffer;
+ length = le32_to_cpu(*(__le32 *)(buffer + sizeof(struct rx_status)))
+ + sizeof(u32);
+
+ if (urb->actual_length != (PLF_MSG_STATUS_OFFSET + 1)) {
+ if (usb->initialized && usb->link_up)
+ handle_rx_packet(usb, buffer, length);
+ goto resubmit;
+ }
+
+ status = buffer[PLF_MSG_STATUS_OFFSET];
+
+ switch (status) {
+ case STATION_FIFO_ALMOST_FULL_NOT_MESSAGE:
+ dev_dbg(&usb->intf->dev,
+ "FIFO full not packet receipt\n");
+ tx->mac_fifo_full = 1;
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++)
+ tx->station[sidx].flag |= STATION_FIFO_FULL_FLAG;
+ break;
+ case STATION_FIFO_ALMOST_FULL_MESSAGE:
+ dev_dbg(&usb->intf->dev, "FIFO full packet receipt\n");
+
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++)
+ tx->station[sidx].flag &= STATION_ACTIVE_FLAG;
+
+ plfxlc_send_packet_from_data_queue(usb);
+ break;
+ case STATION_CONNECT_MESSAGE:
+ usb->link_up = 1;
+ dev_dbg(&usb->intf->dev, "ST_CONNECT_MSG packet receipt\n");
+ break;
+ case STATION_DISCONNECT_MESSAGE:
+ usb->link_up = 0;
+ dev_dbg(&usb->intf->dev, "ST_DISCONN_MSG packet receipt\n");
+ break;
+ default:
+ dev_dbg(&usb->intf->dev, "Unknown packet receipt\n");
+ break;
+ }
+
+resubmit:
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit fail (%d)\n", urb, r);
+}
+
+static struct urb *alloc_rx_urb(struct plfxlc_usb *usb)
+{
+ struct usb_device *udev = plfxlc_usb_to_usbdev(usb);
+ struct urb *urb;
+ void *buffer;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buffer) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
+ buffer, USB_MAX_RX_SIZE,
+ rx_urb_complete, usb);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+static void free_rx_urb(struct urb *urb)
+{
+ if (!urb)
+ return;
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+static int __lf_x_usb_enable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ struct urb **urbs;
+ int i, r;
+
+ r = -ENOMEM;
+ urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL);
+ if (!urbs)
+ goto error;
+
+ for (i = 0; i < RX_URBS_COUNT; i++) {
+ urbs[i] = alloc_rx_urb(usb);
+ if (!urbs[i])
+ goto error;
+ }
+
+ spin_lock_irq(&rx->lock);
+
+ dev_dbg(plfxlc_usb_dev(usb), "irq_disabled %d\n", irqs_disabled());
+
+ if (rx->urbs) {
+ spin_unlock_irq(&rx->lock);
+ r = 0;
+ goto error;
+ }
+ rx->urbs = urbs;
+ rx->urbs_count = RX_URBS_COUNT;
+ spin_unlock_irq(&rx->lock);
+
+ for (i = 0; i < RX_URBS_COUNT; i++) {
+ r = usb_submit_urb(urbs[i], GFP_KERNEL);
+ if (r)
+ goto error_submit;
+ }
+
+ return 0;
+
+error_submit:
+ for (i = 0; i < RX_URBS_COUNT; i++)
+ usb_kill_urb(urbs[i]);
+ spin_lock_irq(&rx->lock);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+ spin_unlock_irq(&rx->lock);
+error:
+ if (urbs) {
+ for (i = 0; i < RX_URBS_COUNT; i++)
+ free_rx_urb(urbs[i]);
+ }
+ return r;
+}
+
+int plfxlc_usb_enable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ int r;
+
+ mutex_lock(&rx->setup_mutex);
+ r = __lf_x_usb_enable_rx(usb);
+ if (!r)
+ usb->rx_usb_enabled = 1;
+
+ mutex_unlock(&rx->setup_mutex);
+
+ return r;
+}
+
+static void __lf_x_usb_disable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ unsigned long flags;
+ unsigned int count;
+ struct urb **urbs;
+ int i;
+
+ spin_lock_irqsave(&rx->lock, flags);
+ urbs = rx->urbs;
+ count = rx->urbs_count;
+ spin_unlock_irqrestore(&rx->lock, flags);
+
+ if (!urbs)
+ return;
+
+ for (i = 0; i < count; i++) {
+ usb_kill_urb(urbs[i]);
+ free_rx_urb(urbs[i]);
+ }
+ kfree(urbs);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+}
+
+void plfxlc_usb_disable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ __lf_x_usb_disable_rx(usb);
+ usb->rx_usb_enabled = 0;
+ mutex_unlock(&rx->setup_mutex);
+}
+
+void plfxlc_usb_disable_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ unsigned long flags;
+
+ clear_bit(PLF_BIT_ENABLED, &tx->enabled);
+
+ /* kill all submitted tx-urbs */
+ usb_kill_anchored_urbs(&tx->submitted);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+ WARN_ON(tx->submitted_urbs != 0);
+ tx->submitted_urbs = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ /* The stopped state is ignored, relying on ieee80211_wake_queues()
+ * in a potentionally following plfxlc_usb_enable_tx().
+ */
+}
+
+void plfxlc_usb_enable_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ set_bit(PLF_BIT_ENABLED, &tx->enabled);
+ tx->submitted_urbs = 0;
+ ieee80211_wake_queues(plfxlc_usb_to_hw(usb));
+ tx->stopped = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+void plfxlc_tx_urb_complete(struct urb *urb)
+{
+ struct ieee80211_tx_info *info;
+ struct plfxlc_usb *usb;
+ struct sk_buff *skb;
+
+ skb = urb->context;
+ info = IEEE80211_SKB_CB(skb);
+ /* grab 'usb' pointer before handing off the skb (since
+ * it might be freed by plfxlc_mac_tx_to_dev or mac80211)
+ */
+ usb = &plfxlc_hw_mac(info->rate_driver_data[0])->chip.usb;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ break;
+ default:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
+ }
+
+ plfxlc_mac_tx_to_dev(skb, urb->status);
+ plfxlc_send_packet_from_data_queue(usb);
+ usb_free_urb(urb);
+}
+
+static inline void init_usb_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+
+ spin_lock_init(&rx->lock);
+ mutex_init(&rx->setup_mutex);
+
+ if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH)
+ rx->usb_packet_size = 512;
+ else
+ rx->usb_packet_size = 64;
+
+ if (rx->fragment_length != 0)
+ dev_dbg(plfxlc_usb_dev(usb), "fragment_length error\n");
+}
+
+static inline void init_usb_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+
+ spin_lock_init(&tx->lock);
+ clear_bit(PLF_BIT_ENABLED, &tx->enabled);
+ tx->stopped = 0;
+ skb_queue_head_init(&tx->submitted_skbs);
+ init_usb_anchor(&tx->submitted);
+}
+
+void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
+ struct usb_interface *intf)
+{
+ memset(usb, 0, sizeof(*usb));
+ usb->intf = usb_get_intf(intf);
+ usb_set_intfdata(usb->intf, hw);
+ init_usb_tx(usb);
+ init_usb_rx(usb);
+}
+
+void plfxlc_usb_release(struct plfxlc_usb *usb)
+{
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_usb_disable_tx(usb);
+ plfxlc_usb_disable_rx(usb);
+ usb_set_intfdata(usb->intf, NULL);
+ usb_put_intf(usb->intf);
+}
+
+const char *plfxlc_speed(enum usb_device_speed speed)
+{
+ switch (speed) {
+ case USB_SPEED_LOW:
+ return "low";
+ case USB_SPEED_FULL:
+ return "full";
+ case USB_SPEED_HIGH:
+ return "high";
+ default:
+ return "unknown";
+ }
+}
+
+int plfxlc_usb_init_hw(struct plfxlc_usb *usb)
+{
+ int r;
+
+ r = usb_reset_configuration(plfxlc_usb_to_usbdev(usb));
+ if (r) {
+ dev_err(plfxlc_usb_dev(usb), "cfg reset failed (%d)\n", r);
+ return r;
+ }
+ return 0;
+}
+
+static void get_usb_req(struct usb_device *udev, void *buffer,
+ u32 buffer_len, enum plf_usb_req_enum usb_req_id,
+ struct plf_usb_req *usb_req)
+{
+ __be32 payload_len_nw = cpu_to_be32(buffer_len + FCS_LEN);
+ const u8 *buffer_src_p = buffer;
+ u8 *buffer_dst = usb_req->buf;
+ u32 temp_usb_len = 0;
+
+ usb_req->id = cpu_to_be32(usb_req_id);
+ usb_req->len = cpu_to_be32(0);
+
+ /* Copy buffer length into the transmitted buffer, as it is important
+ * for the Rx MAC to know its exact length.
+ */
+ if (usb_req->id == cpu_to_be32(USB_REQ_BEACON_WR)) {
+ memcpy(buffer_dst, &payload_len_nw, sizeof(payload_len_nw));
+ buffer_dst += sizeof(payload_len_nw);
+ temp_usb_len += sizeof(payload_len_nw);
+ }
+
+ memcpy(buffer_dst, buffer_src_p, buffer_len);
+ buffer_dst += buffer_len;
+ buffer_src_p += buffer_len;
+ temp_usb_len += buffer_len;
+
+ /* Set the FCS_LEN (4) bytes as 0 for CRC checking. */
+ memset(buffer_dst, 0, FCS_LEN);
+ buffer_dst += FCS_LEN;
+ temp_usb_len += FCS_LEN;
+
+ /* Round the packet to be transmitted to 4 bytes. */
+ if (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT) {
+ memset(buffer_dst, 0, PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len %
+ PURELIFI_BYTE_NUM_ALIGNMENT));
+ buffer_dst += PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len %
+ PURELIFI_BYTE_NUM_ALIGNMENT);
+ temp_usb_len += PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT);
+ }
+
+ usb_req->len = cpu_to_be32(temp_usb_len);
+}
+
+int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ int buffer_len, enum plf_usb_req_enum usb_req_id,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ struct usb_device *udev = interface_to_usbdev(usb->ez_usb);
+ struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC);
+ int r;
+
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
+ (void *)buffer, buffer_len, complete_fn, context);
+
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_err(&udev->dev, "Async write submit failed (%d)\n", r);
+
+ return r;
+}
+
+int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len,
+ enum plf_usb_req_enum usb_req_id)
+{
+ struct usb_device *udev = interface_to_usbdev(ez_usb);
+ unsigned char *dma_buffer = NULL;
+ struct plf_usb_req usb_req;
+ int usb_bulk_msg_len;
+ int actual_length;
+ int r;
+
+ get_usb_req(udev, buffer, buffer_len, usb_req_id, &usb_req);
+ usb_bulk_msg_len = sizeof(__le32) + sizeof(__le32) +
+ be32_to_cpu(usb_req.len);
+
+ dma_buffer = kmemdup(&usb_req, usb_bulk_msg_len, GFP_KERNEL);
+
+ if (!dma_buffer) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, EP_DATA_OUT),
+ dma_buffer, usb_bulk_msg_len,
+ &actual_length, USB_BULK_MSG_TIMEOUT_MS);
+ kfree(dma_buffer);
+error:
+ if (r) {
+ r = -ENOMEM;
+ dev_err(&udev->dev, "usb_bulk_msg failed (%d)\n", r);
+ }
+
+ return r;
+}
+
+static void slif_data_plane_sap_timer_callb(struct timer_list *t)
+{
+ struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer);
+
+ plfxlc_send_packet_from_data_queue(usb);
+ timer_setup(&usb->tx.tx_retry_timer,
+ slif_data_plane_sap_timer_callb, 0);
+ mod_timer(&usb->tx.tx_retry_timer, jiffies + TX_RETRY_BACKOFF_JIFF);
+}
+
+static void sta_queue_cleanup_timer_callb(struct timer_list *t)
+{
+ struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup);
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ int sidx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) {
+ tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG;
+ } else {
+ memset(tx->station[sidx].mac, 0, ETH_ALEN);
+ tx->station[sidx].flag = 0;
+ }
+ }
+ timer_setup(&usb->sta_queue_cleanup,
+ sta_queue_cleanup_timer_callb, 0);
+ mod_timer(&usb->sta_queue_cleanup, jiffies + STA_QUEUE_CLEANUP_JIFF);
+}
+
+static int probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ u8 serial_number[PURELIFI_SERIAL_LEN];
+ struct ieee80211_hw *hw = NULL;
+ struct plfxlc_usb_tx *tx;
+ struct plfxlc_chip *chip;
+ struct plfxlc_usb *usb;
+ u8 hw_address[ETH_ALEN];
+ unsigned int i;
+ int r = 0;
+
+ hw = plfxlc_mac_alloc_hw(intf);
+
+ if (!hw) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ chip = &plfxlc_hw_mac(hw)->chip;
+ usb = &chip->usb;
+ usb->ez_usb = intf;
+ tx = &usb->tx;
+
+ r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number);
+ if (r) {
+ dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r);
+ goto error;
+ }
+
+ chip->unit_type = STA;
+ dev_err(&intf->dev, "Unit type is station");
+
+ r = plfxlc_mac_preinit_hw(hw, hw_address);
+ if (r) {
+ dev_err(&intf->dev, "Init mac failed (%d)\n", r);
+ goto error;
+ }
+
+ r = ieee80211_register_hw(hw);
+ if (r) {
+ dev_err(&intf->dev, "Register device failed (%d)\n", r);
+ goto error;
+ }
+
+ if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) ==
+ PURELIFI_XL_VENDOR_ID_0) &&
+ (le16_to_cpu(interface_to_usbdev(intf)->descriptor.idProduct) ==
+ PURELIFI_XL_PRODUCT_ID_0)) {
+ r = plfxlc_download_xl_firmware(intf);
+ } else {
+ r = plfxlc_download_fpga(intf);
+ }
+ if (r != 0) {
+ dev_err(&intf->dev, "FPGA download failed (%d)\n", r);
+ goto error;
+ }
+
+ tx->mac_fifo_full = 0;
+ spin_lock_init(&tx->lock);
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_usb_init_hw(usb);
+ if (r < 0) {
+ dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_chip_set_rate(chip, 8);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_usb_wreq(usb->ez_usb,
+ hw_address, ETH_ALEN, USB_REQ_MAC_WR);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r);
+ goto error;
+ }
+
+ plfxlc_chip_enable_rxtx(chip);
+
+ /* Initialise the data plane Tx queue */
+ for (i = 0; i < MAX_STA_NUM; i++) {
+ skb_queue_head_init(&tx->station[i].data_list);
+ tx->station[i].flag = 0;
+ }
+
+ tx->station[STA_BROADCAST_INDEX].flag |= STATION_CONNECTED_FLAG;
+ for (i = 0; i < ETH_ALEN; i++)
+ tx->station[STA_BROADCAST_INDEX].mac[i] = 0xFF;
+
+ timer_setup(&tx->tx_retry_timer, slif_data_plane_sap_timer_callb, 0);
+ tx->tx_retry_timer.expires = jiffies + TX_RETRY_BACKOFF_JIFF;
+ add_timer(&tx->tx_retry_timer);
+
+ timer_setup(&usb->sta_queue_cleanup,
+ sta_queue_cleanup_timer_callb, 0);
+ usb->sta_queue_cleanup.expires = jiffies + STA_QUEUE_CLEANUP_JIFF;
+ add_timer(&usb->sta_queue_cleanup);
+
+ plfxlc_mac_init_hw(hw);
+ usb->initialized = true;
+ return 0;
+error:
+ if (hw) {
+ plfxlc_mac_release(plfxlc_hw_mac(hw));
+ ieee80211_unregister_hw(hw);
+ ieee80211_free_hw(hw);
+ }
+ dev_err(&intf->dev, "pureLifi:Device error");
+ return r;
+}
+
+static void disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ /* Either something really bad happened, or
+ * we're just dealing with a DEVICE_INSTALLER.
+ */
+ if (!hw)
+ return;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ del_timer_sync(&usb->tx.tx_retry_timer);
+ del_timer_sync(&usb->sta_queue_cleanup);
+
+ ieee80211_unregister_hw(hw);
+
+ plfxlc_chip_disable_rxtx(&mac->chip);
+
+ /* If the disconnect has been caused by a removal of the
+ * driver module, the reset allows reloading of the driver. If the
+ * reset will not be executed here, the upload of the firmware in the
+ * probe function caused by the reloading of the driver will fail.
+ */
+ usb_reset_device(interface_to_usbdev(intf));
+
+ plfxlc_mac_release(mac);
+ ieee80211_free_hw(hw);
+}
+
+static void plfxlc_usb_resume(struct plfxlc_usb *usb)
+{
+ struct plfxlc_mac *mac = plfxlc_usb_to_mac(usb);
+ int r;
+
+ r = plfxlc_op_start(plfxlc_usb_to_hw(usb));
+ if (r < 0) {
+ dev_warn(plfxlc_usb_dev(usb),
+ "Device resume failed (%d)\n", r);
+
+ if (usb->was_running)
+ set_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+
+ usb_queue_reset_device(usb->intf);
+ return;
+ }
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+ r = plfxlc_restore_settings(mac);
+ if (r < 0) {
+ dev_dbg(plfxlc_usb_dev(usb),
+ "Restore failed (%d)\n", r);
+ return;
+ }
+ }
+}
+
+static void plfxlc_usb_stop(struct plfxlc_usb *usb)
+{
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_usb_disable_tx(usb);
+ plfxlc_usb_disable_rx(usb);
+
+ usb->initialized = false;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ usb->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+
+ plfxlc_usb_stop(usb);
+
+ return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ if (usb->was_running)
+ plfxlc_usb_resume(usb);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static struct plfxlc_usb *get_plfxlc_usb(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf);
+ struct plfxlc_mac *mac;
+
+ /* Either something really bad happened, or
+ * we're just dealing with a DEVICE_INSTALLER.
+ */
+ if (!hw)
+ return NULL;
+
+ mac = plfxlc_hw_mac(hw);
+ return &mac->chip.usb;
+}
+
+static int suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct plfxlc_usb *pl = get_plfxlc_usb(interface);
+ struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl);
+
+ if (!pl || !plfxlc_usb_dev(pl))
+ return -ENODEV;
+ if (pl->initialized == 0)
+ return 0;
+ pl->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+ plfxlc_usb_stop(pl);
+ return 0;
+}
+
+static int resume(struct usb_interface *interface)
+{
+ struct plfxlc_usb *pl = get_plfxlc_usb(interface);
+
+ if (!pl || !plfxlc_usb_dev(pl))
+ return -ENODEV;
+ if (pl->was_running)
+ plfxlc_usb_resume(pl);
+ return 0;
+}
+
+#endif
+
+static struct usb_driver driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = usb_ids,
+ .probe = probe,
+ .disconnect = disconnect,
+ .pre_reset = pre_reset,
+ .post_reset = post_reset,
+#ifdef CONFIG_PM
+ .suspend = suspend,
+ .resume = resume,
+#endif
+ .disable_hub_initiated_lpm = 1,
+};
+
+static int __init usb_init(void)
+{
+ int r;
+
+ r = usb_register(&driver);
+ if (r) {
+ pr_err("%s usb_register() failed %d\n", driver.name, r);
+ return r;
+ }
+
+ pr_debug("Driver initialized :%s\n", driver.name);
+ return 0;
+}
+
+static void __exit usb_exit(void)
+{
+ usb_deregister(&driver);
+ pr_debug("%s %s\n", driver.name, __func__);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB driver for pureLiFi devices");
+MODULE_AUTHOR("pureLiFi");
+MODULE_VERSION("1.0");
+MODULE_FIRMWARE("plfxlc/lifi-x.bin");
+MODULE_DEVICE_TABLE(usb, usb_ids);
+
+module_init(usb_init);
+module_exit(usb_exit);
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.h b/drivers/net/wireless/purelifi/plfxlc/usb.h
new file mode 100644
index 000000000000..ba2defd593bd
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_USB_H
+#define PLFXLC_USB_H
+
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+
+#include "intf.h"
+
+#define USB_BULK_MSG_TIMEOUT_MS 2000
+
+#define PURELIFI_X_VENDOR_ID_0 0x16C1
+#define PURELIFI_X_PRODUCT_ID_0 0x1CDE
+#define PURELIFI_XC_VENDOR_ID_0 0x2EF5
+#define PURELIFI_XC_PRODUCT_ID_0 0x0008
+#define PURELIFI_XL_VENDOR_ID_0 0x2EF5
+#define PURELIFI_XL_PRODUCT_ID_0 0x000A /* Station */
+
+#define PLF_FPGA_STATUS_LEN 2
+#define PLF_FPGA_STATE_LEN 9
+#define PLF_BULK_TLEN 16384
+#define PLF_FPGA_MG 6 /* Magic check */
+#define PLF_XL_BUF_LEN 64
+#define PLF_MSG_STATUS_OFFSET 7
+
+#define PLF_USB_TIMEOUT 1000
+#define PLF_MSLEEP_TIME 200
+
+#define PURELIFI_URB_RETRY_MAX 5
+
+#define plfxlc_usb_dev(usb) (&(usb)->intf->dev)
+
+/* Tx retry backoff timer (in milliseconds) */
+#define TX_RETRY_BACKOFF_MS 10
+#define STA_QUEUE_CLEANUP_MS 5000
+
+/* Tx retry backoff timer (in jiffies) */
+#define TX_RETRY_BACKOFF_JIFF ((TX_RETRY_BACKOFF_MS * HZ) / 1000)
+#define STA_QUEUE_CLEANUP_JIFF ((STA_QUEUE_CLEANUP_MS * HZ) / 1000)
+
+/* Ensures that MAX_TRANSFER_SIZE is even. */
+#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1)
+#define plfxlc_urb_dev(urb) (&(urb)->dev->dev)
+
+#define STATION_FIFO_ALMOST_FULL_MESSAGE 0
+#define STATION_FIFO_ALMOST_FULL_NOT_MESSAGE 1
+#define STATION_CONNECT_MESSAGE 2
+#define STATION_DISCONNECT_MESSAGE 3
+
+int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len,
+ enum plf_usb_req_enum usb_req_id);
+void plfxlc_tx_urb_complete(struct urb *urb);
+
+enum {
+ USB_MAX_RX_SIZE = 4800,
+ USB_MAX_EP_INT_BUFFER = 64,
+};
+
+struct plfxlc_usb_interrupt {
+ spinlock_t lock; /* spin lock for usb interrupt buffer */
+ struct urb *urb;
+ void *buffer;
+ int interval;
+};
+
+#define RX_URBS_COUNT 5
+
+struct plfxlc_usb_rx {
+ spinlock_t lock; /* spin lock for rx urb */
+ struct mutex setup_mutex; /* mutex lockt for rx urb */
+ u8 fragment[2 * USB_MAX_RX_SIZE];
+ unsigned int fragment_length;
+ unsigned int usb_packet_size;
+ struct urb **urbs;
+ int urbs_count;
+};
+
+struct plf_station {
+ /* 7...3 | 2 | 1 | 0 |
+ * Reserved | Heartbeat | FIFO full | Connected |
+ */
+ unsigned char flag;
+ unsigned char mac[ETH_ALEN];
+ struct sk_buff_head data_list;
+};
+
+struct plfxlc_firmware_file {
+ u32 total_files;
+ u32 total_size;
+ u32 size;
+ u32 start_addr;
+ u32 control_packets;
+} __packed;
+
+#define STATION_CONNECTED_FLAG 0x1
+#define STATION_FIFO_FULL_FLAG 0x2
+#define STATION_HEARTBEAT_FLAG 0x4
+#define STATION_ACTIVE_FLAG 0xFD
+
+#define PURELIFI_SERIAL_LEN 256
+#define STA_BROADCAST_INDEX (AP_USER_LIMIT)
+#define MAX_STA_NUM (AP_USER_LIMIT + 1)
+
+struct plfxlc_usb_tx {
+ unsigned long enabled;
+ spinlock_t lock; /* spinlock for USB tx */
+ u8 mac_fifo_full;
+ struct sk_buff_head submitted_skbs;
+ struct usb_anchor submitted;
+ int submitted_urbs;
+ bool stopped;
+ struct timer_list tx_retry_timer;
+ struct plf_station station[MAX_STA_NUM];
+};
+
+/* Contains the usb parts. The structure doesn't require a lock because intf
+ * will not be changed after initialization.
+ */
+struct plfxlc_usb {
+ struct timer_list sta_queue_cleanup;
+ struct plfxlc_usb_rx rx;
+ struct plfxlc_usb_tx tx;
+ struct usb_interface *intf;
+ struct usb_interface *ez_usb;
+ u8 req_buf[64]; /* plfxlc_usb_iowrite16v needs 62 bytes */
+ u8 sidx; /* store last served */
+ bool rx_usb_enabled;
+ bool initialized;
+ bool was_running;
+ bool link_up;
+};
+
+enum endpoints {
+ EP_DATA_IN = 2,
+ EP_DATA_OUT = 8,
+};
+
+enum devicetype {
+ DEVICE_LIFI_X = 0,
+ DEVICE_LIFI_XC = 1,
+ DEVICE_LIFI_XL = 1,
+};
+
+enum {
+ PLF_BIT_ENABLED = 1,
+ PLF_BIT_MAX = 2,
+};
+
+int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ int buffer_len, enum plf_usb_req_enum usb_req_id,
+ usb_complete_t complete_fn, void *context);
+
+static inline struct usb_device *
+plfxlc_usb_to_usbdev(struct plfxlc_usb *usb)
+{
+ return interface_to_usbdev(usb->intf);
+}
+
+static inline struct ieee80211_hw *
+plfxlc_intf_to_hw(struct usb_interface *intf)
+{
+ return usb_get_intfdata(intf);
+}
+
+static inline struct ieee80211_hw *
+plfxlc_usb_to_hw(struct plfxlc_usb *usb)
+{
+ return plfxlc_intf_to_hw(usb->intf);
+}
+
+void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
+ struct usb_interface *intf);
+void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb);
+void plfxlc_usb_release(struct plfxlc_usb *usb);
+void plfxlc_usb_disable_rx(struct plfxlc_usb *usb);
+void plfxlc_usb_enable_tx(struct plfxlc_usb *usb);
+void plfxlc_usb_disable_tx(struct plfxlc_usb *usb);
+int plfxlc_usb_tx(struct plfxlc_usb *usb, struct sk_buff *skb);
+int plfxlc_usb_enable_rx(struct plfxlc_usb *usb);
+int plfxlc_usb_init_hw(struct plfxlc_usb *usb);
+const char *plfxlc_speed(enum usb_device_speed speed);
+
+/* Firmware declarations */
+int plfxlc_download_xl_firmware(struct usb_interface *intf);
+int plfxlc_download_fpga(struct usb_interface *intf);
+
+int plfxlc_upload_mac_and_serial(struct usb_interface *intf,
+ unsigned char *hw_address,
+ unsigned char *serial_number);
+
+#endif /* PLFXLC_USB_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index deddb0afd312..cbdaf7992f98 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1801,8 +1801,8 @@ int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* do not have a choice if some connected STA is not capable to
* receive the same amount of data like the others.
*/
- if (sta->ht_cap.ht_supported) {
- drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++;
+ if (sta->deflink.ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]++;
rt2800_set_max_psdu_len(rt2x00dev);
}
@@ -1847,8 +1847,8 @@ int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
int wcid = sta_priv->wcid;
- if (sta->ht_cap.ht_supported) {
- drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--;
+ if (sta->deflink.ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]--;
rt2800_set_max_psdu_len(rt2x00dev);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index fb1d31b2d52a..aa6b2f3d2eff 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -303,7 +303,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
if (sta) {
sta_priv = sta_to_rt2x00_sta(sta);
txdesc->u.ht.wcid = sta_priv->wcid;
- density = sta->ht_cap.ampdu_density;
+ density = sta->deflink.ht_cap.ampdu_density;
}
/*
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 2477e18c7cae..025619cd14e8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
+ unsigned int prio = 0;
unsigned long flags;
- unsigned int idx, prio, hw_prio;
+ unsigned int idx, hw_prio;
+
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
@@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
/* do arithmetic and then convert to le16 */
u16 frame_duration = 0;
- prio = skb_get_queue_mapping(skb);
+ /* rtl8180/rtl8185 only has one useable tx queue */
+ if (dev->queues > IEEE80211_AC_BK)
+ prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 06d59ffb7444..8b2ca9e8eac6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1607,6 +1607,7 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
+ struct ieee80211_hw *hw = priv->hw;
u32 val32, bonding;
u16 val16;
@@ -1684,6 +1685,9 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->has_wifi = 1;
}
+ hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
+ hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
+
switch (priv->rtl_chip) {
case RTL8188E:
case RTL8192E:
@@ -4282,6 +4286,17 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
rtl8xxxu_debug = tmp_debug;
}
+static
+int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ *tx_ant = BIT(priv->tx_paths) - 1;
+ *rx_ant = BIT(priv->rx_paths) - 1;
+
+ return 0;
+}
+
static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, const u8 *mac)
{
@@ -4458,6 +4473,35 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
priv->rx_buf_aggregation = 1;
}
+static const struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE_54M)
+ return;
+
+ if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+ if (rate < DESC_RATE_MCS8)
+ *nss = 1;
+ else
+ *nss = 2;
+ *mcs = rate - DESC_RATE_MCS0;
+ }
+}
+
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
struct ieee80211_hw *hw = priv->hw;
@@ -4489,21 +4533,21 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
u16 network_type = WIRELESS_MODE_UNKNOWN;
if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
network_type = WIRELESS_MODE_AC;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
network_type = WIRELESS_MODE_N_5G;
network_type |= WIRELESS_MODE_A;
} else {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
network_type = WIRELESS_MODE_AC;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
network_type = WIRELESS_MODE_N_24G;
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
network_type |= WIRELESS_MODE_B;
- else if (sta->supp_rates[0] & 0xf)
+ else if (sta->deflink.supp_rates[0] & 0xf)
network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G);
else
network_type |= WIRELESS_MODE_G;
@@ -4519,9 +4563,12 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct ieee80211_sta *sta;
+ struct rtl8xxxu_ra_report *rarpt;
u32 val32;
u8 val8;
+ rarpt = &priv->ra_report;
+
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
@@ -4530,6 +4577,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (bss_conf->assoc) {
u32 ramask;
int sgi = 0;
+ u8 highest_rate;
+ u8 mcs = 0, nss = 0;
+ u32 bit_rate;
+
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -4540,20 +4591,43 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto error;
}
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
dev_info(dev, "%s: VHT supported\n", __func__);
/* TODO: Set bits 28-31 for rate adaptive id */
- ramask = (sta->supp_rates[0] & 0xfff) |
- sta->ht_cap.mcs.rx_mask[0] << 12 |
- sta->ht_cap.mcs.rx_mask[1] << 20;
- if (sta->ht_cap.cap &
+ ramask = (sta->deflink.supp_rates[0] & 0xfff) |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12 |
+ sta->deflink.ht_cap.mcs.rx_mask[1] << 20;
+ if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
rcu_read_unlock();
+ highest_rate = fls(ramask) - 1;
+ if (highest_rate < DESC_RATE_MCS0) {
+ rarpt->txrate.legacy =
+ rtl8xxxu_legacy_ratetable[highest_rate].bitrate;
+ } else {
+ rtl8xxxu_desc_to_mcsrate(highest_rate,
+ &mcs, &nss);
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ rarpt->txrate.mcs = mcs;
+ rarpt->txrate.nss = nss;
+
+ if (sgi) {
+ rarpt->txrate.flags |=
+ RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ rarpt->txrate.bw |= RATE_INFO_BW_20;
+ }
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+ rarpt->desc_rate = highest_rate;
+
priv->vif = vif;
priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
@@ -5021,12 +5095,12 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
ampdu_enable = false;
if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
u32 ampdu, val32;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- ampdu = (u32)sta->ht_cap.ampdu_density;
+ ampdu = (u32)sta->deflink.ht_cap.ampdu_density;
val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
tx_desc->txdw2 |= cpu_to_le32(val32);
@@ -5041,7 +5115,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->ht_cap.cap &
+ sta && sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
sgi = true;
@@ -5404,35 +5478,6 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
}
}
-static struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
- {.bitrate = 10, .hw_value = 0x00,},
- {.bitrate = 20, .hw_value = 0x01,},
- {.bitrate = 55, .hw_value = 0x02,},
- {.bitrate = 110, .hw_value = 0x03,},
- {.bitrate = 60, .hw_value = 0x04,},
- {.bitrate = 90, .hw_value = 0x05,},
- {.bitrate = 120, .hw_value = 0x06,},
- {.bitrate = 180, .hw_value = 0x07,},
- {.bitrate = 240, .hw_value = 0x08,},
- {.bitrate = 360, .hw_value = 0x09,},
- {.bitrate = 480, .hw_value = 0x0a,},
- {.bitrate = 540, .hw_value = 0x0b,},
-};
-
-static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
-{
- if (rate <= DESC_RATE_54M)
- return;
-
- if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
- if (rate < DESC_RATE_MCS8)
- *nss = 1;
- else
- *nss = 2;
- *mcs = rate - DESC_RATE_MCS0;
- }
-}
-
static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv;
@@ -6117,8 +6162,8 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_TX_START:
dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
- ampdu_factor = sta->ht_cap.ampdu_factor;
- ampdu_density = sta->ht_cap.ampdu_density;
+ ampdu_factor = sta->deflink.ht_cap.ampdu_factor;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
dev_dbg(dev,
@@ -6210,10 +6255,10 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u32 rate_bitmap = 0;
rcu_read_lock();
- rate_bitmap = (sta->supp_rates[0] & 0xfff) |
- (sta->ht_cap.mcs.rx_mask[0] << 12) |
- (sta->ht_cap.mcs.rx_mask[1] << 20);
- if (sta->ht_cap.cap &
+ rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
+ if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
rcu_read_unlock();
@@ -6472,6 +6517,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.set_key = rtl8xxxu_set_key,
.ampdu_action = rtl8xxxu_ampdu_action,
.sta_statistics = rtl8xxxu_sta_statistics,
+ .get_antenna = rtl8xxxu_get_antenna,
};
static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ffd150ec181f..9e7e98b55eff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -629,11 +629,12 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
if (sta == NULL)
return;
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
+ sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
- if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported))
+ if (!sta->deflink.ht_cap.ht_supported &&
+ !sta->deflink.vht_cap.vht_supported)
return;
if (!sgi_40 && !sgi_20)
@@ -645,8 +646,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- bw_80 = sta->vht_cap.vht_supported;
+ bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_80 = sta->deflink.vht_cap.vht_supported;
}
if (bw_80) {
@@ -864,11 +865,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->ht_cap.ht_supported) ||
- !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ if (!(sta->deflink.ht_cap.ht_supported) ||
+ !(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
- if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
+ if (!mac->bw_40 || !(sta->deflink.ht_cap.ht_supported))
return;
}
if (tcb_desc->multicast || tcb_desc->broadcast)
@@ -884,11 +885,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->vht_cap.vht_supported))
+ if (!(sta->deflink.vht_cap.vht_supported))
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
if (!mac->bw_80 ||
- !(sta->vht_cap.vht_supported))
+ !(sta->deflink.vht_cap.vht_supported))
return;
}
if (tcb_desc->hw_rate <=
@@ -904,7 +905,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 hw_rate;
- u16 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ u16 tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
if ((get_rf_type(rtlphy) == RF_2T2R) &&
(tx_mcs_map & 0x000c) != 0x000c) {
@@ -944,7 +945,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
u8 hw_rate;
if (get_rf_type(rtlphy) == RF_2T2R &&
- sta->ht_cap.mcs.rx_mask[1] != 0)
+ sta->deflink.ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1271,11 +1272,11 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
*and N rate will all be controlled by FW
*when tcb_desc->use_driver_rate = false
*/
- if (sta && sta->vht_cap.vht_supported) {
+ if (sta && sta->deflink.vht_cap.vht_supported) {
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta && sta->deflink.ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -1994,8 +1995,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
unsigned long flags;
- struct rtl_bssid_entry *entry;
- bool entry_found = false;
+ struct rtl_bssid_entry *entry = NULL, *iter;
/* check if it is scanning */
if (!mac->act_scanning)
@@ -2008,10 +2008,10 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
- list_for_each_entry(entry, &rtlpriv->scan_list.list, list) {
- if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
- list_del_init(&entry->list);
- entry_found = true;
+ list_for_each_entry(iter, &rtlpriv->scan_list.list, list) {
+ if (memcmp(iter->bssid, hdr->addr3, ETH_ALEN) == 0) {
+ list_del_init(&iter->list);
+ entry = iter;
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Update BSSID=%pM to scan list (total=%d)\n",
hdr->addr3, rtlpriv->scan_list.num);
@@ -2019,7 +2019,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
- if (!entry_found) {
+ if (!entry) {
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index a18dffc8753a..67d0b9aee064 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -1600,18 +1600,10 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
coex_dm->auto_tdma_adjust = false;
}
} else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
- /* HID+A2DP */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- } else {
- /*for low BT RSSI*/
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- }
+ /* HID+A2DP (no need to consider BT RSSI) */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
} else if ((bt_link_info->pan_only) ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 8efe2f5e5b9f..99a1d91ced5a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -903,18 +903,18 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
sta_entry->wireless_mode = WIRELESS_MODE_G;
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
sta_entry->wireless_mode = WIRELESS_MODE_B;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
if (vif->type == NL80211_IFTYPE_ADHOC)
sta_entry->wireless_mode = WIRELESS_MODE_G;
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
sta_entry->wireless_mode = WIRELESS_MODE_A;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_5G;
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_AC_5G;
if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -922,7 +922,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
}
/*disable cck rate for p2p*/
if (mac->p2p)
- sta->supp_rates[0] &= 0xfffffff0;
+ sta->deflink.supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1126,7 +1126,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
"send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
IEEE80211_SMPS_STATIC);
}
@@ -1134,20 +1134,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_5G) {
mac->mode = WIRELESS_MODE_A;
} else {
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
mac->mode = WIRELESS_MODE_B;
else
mac->mode = WIRELESS_MODE_G;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
if (rtlhal->current_bandtype == BAND_ON_2_4G)
mac->mode = WIRELESS_MODE_N_24G;
else
mac->mode = WIRELESS_MODE_N_5G;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
if (rtlhal->current_bandtype == BAND_ON_5G)
mac->mode = WIRELESS_MODE_AC_5G;
else
@@ -1256,14 +1256,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
if (sta) {
- if (sta->ht_cap.ampdu_density >
+ if (sta->deflink.ht_cap.ampdu_density >
mac->current_ampdu_density)
mac->current_ampdu_density =
- sta->ht_cap.ampdu_density;
- if (sta->ht_cap.ampdu_factor <
+ sta->deflink.ht_cap.ampdu_density;
+ if (sta->deflink.ht_cap.ampdu_factor <
mac->current_ampdu_factor)
mac->current_ampdu_factor =
- sta->ht_cap.ampdu_factor;
+ sta->deflink.ht_cap.ampdu_factor;
}
rcu_read_unlock();
@@ -1298,20 +1298,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_5G) {
mac->mode = WIRELESS_MODE_A;
} else {
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
mac->mode = WIRELESS_MODE_B;
else
mac->mode = WIRELESS_MODE_G;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
if (rtlhal->current_bandtype == BAND_ON_2_4G)
mac->mode = WIRELESS_MODE_N_24G;
else
mac->mode = WIRELESS_MODE_N_5G;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
if (rtlhal->current_bandtype == BAND_ON_5G)
mac->mode = WIRELESS_MODE_AC_5G;
else
@@ -1327,7 +1327,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
sta_entry->wireless_mode = mac->mode;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
mac->ht_enable = true;
/*
@@ -1338,16 +1338,16 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
* */
}
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
mac->vht_enable = true;
if (changed & BSS_CHANGED_BASIC_RATES) {
/* for 5G must << RATE_6M_INDEX = 4,
* because 5G have no cck rate*/
if (rtlhal->current_bandtype == BAND_ON_5G)
- basic_rates = sta->supp_rates[1] << 4;
+ basic_rates = sta->deflink.supp_rates[1] << 4;
else
- basic_rates = sta->supp_rates[0];
+ basic_rates = sta->deflink.supp_rates[0];
mac->basic_rates = basic_rates;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ad327bae754b..8e4c15654746 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -323,14 +323,13 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- bool find_buddy_priv = false;
- struct rtl_priv *tpriv;
+ struct rtl_priv *tpriv = NULL, *iter;
struct rtl_pci_priv *tpcipriv = NULL;
if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
+ list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
list) {
- tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+ tpcipriv = (struct rtl_pci_priv *)iter->priv;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
@@ -344,19 +343,19 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
tpcipriv->ndis_adapter.devnumber &&
pcipriv->ndis_adapter.funcnumber !=
tpcipriv->ndis_adapter.funcnumber) {
- find_buddy_priv = true;
+ tpriv = iter;
break;
}
}
}
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ "find_buddy_priv %d\n", tpriv != NULL);
- if (find_buddy_priv)
+ if (tpriv)
*buddy_priv = tpriv;
- return find_buddy_priv;
+ return tpriv != NULL;
}
static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 4b5ea0ec9109..a164364109ba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -66,7 +66,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
else
return N_MODE_MCS15_RIX;
} else if (wireless_mode == WIRELESS_MODE_AC_24G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) {
ieee80211_rate_set_vht(&rate,
AC_MODE_MCS8_RIX,
nss);
@@ -88,7 +88,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
else
return N_MODE_MCS15_RIX;
} else if (wireless_mode == WIRELESS_MODE_AC_5G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) {
ieee80211_rate_set_vht(&rate,
AC_MODE_MCS8_RIX,
nss);
@@ -121,9 +121,9 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
if (sta) {
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
+ sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
wireless_mode = sta_entry->wireless_mode;
}
@@ -135,10 +135,10 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta && (sta->ht_cap.cap &
+ if (sta && (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && sta->vht_cap.vht_supported)
+ if (sta && sta->deflink.vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_80)
@@ -149,11 +149,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (sta && sta->ht_cap.ht_supported &&
+ if (sta && sta->deflink.ht_cap.ht_supported &&
(wireless_mode == WIRELESS_MODE_N_5G ||
wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
- if (sta && sta->vht_cap.vht_supported &&
+ if (sta && sta->deflink.vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
wireless_mode == WIRELESS_MODE_AC_24G ||
wireless_mode == WIRELESS_MODE_AC_ONLY))
@@ -229,7 +229,7 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (sta->ht_cap.ht_supported &&
+ if (sta->deflink.ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index bf686a916acb..58c2ab3d44be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1975,21 +1975,21 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2061,11 +2061,11 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool b_shortgi = false;
@@ -2083,13 +2083,13 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index c948dafa0c80..6e4741e9483f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -58,7 +58,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = cck_buf->cck_agc_rpt;
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
if (ppsc->rfpwr_state == ERFON)
@@ -187,7 +187,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -504,7 +504,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -591,7 +591,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index bb5a0c4aec93..b9c62640d2cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1765,22 +1765,22 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -1853,11 +1853,11 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap &
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1874,13 +1874,13 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 4165175cf5c0..730c7e939bd2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -166,7 +166,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -379,7 +379,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -441,7 +441,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index eaba66113328..a040c07791d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -520,7 +520,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
* 2 out-ep. Remainder pages have assigned to High queue */
if (outepnum > 1 && txqremaininpage)
numhq += txqremaininpage;
- /* NOTE: This step done before writting REG_RQPN. */
+ /* NOTE: This step done before writing REG_RQPN. */
if (ischipn) {
if (queue_sel & TX_SELE_NQ)
numnq = txqpageunit;
@@ -539,7 +539,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ :
WMM_CHIP_A_PAGE_NUM_LPQ;
}
- /* NOTE: This step done before writting REG_RQPN. */
+ /* NOTE: This step done before writing REG_RQPN. */
if (ischipn) {
if (queue_sel & TX_SELE_NQ)
numnq = WMM_CHIP_B_PAGE_NUM_NPQ;
@@ -1918,21 +1918,21 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2003,11 +2003,11 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = curtxbw_40mhz &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2025,13 +2025,13 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 87f959d5d861..ae3c4f97637e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -540,7 +540,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(txdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index f849291cc587..2aecb2583f75 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1802,18 +1802,18 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value = sta->deflink.supp_rates[0];
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_A:
ratr_value &= 0x00000FF0;
@@ -1880,10 +1880,10 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1901,11 +1901,11 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap = sta->deflink.supp_rates[0];
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index c02813fba934..807b66c16e11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -498,7 +498,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
@@ -586,7 +586,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 76189283104c..47d8999e31c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2256,11 +2256,11 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool b_shortgi = false;
@@ -2276,12 +2276,12 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index eef7a041e80d..8043d819fb85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -55,7 +55,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a;
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
@@ -153,7 +153,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1)
@@ -665,7 +665,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -759,7 +759,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 91199262aaca..4ca299c9de77 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -2017,20 +2017,20 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate = 0;
u32 tmp_ratr_value = 0;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_value &= 0x0000000D;
@@ -2115,10 +2115,10 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index = 0;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2139,13 +2139,13 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
band |= WIRELESS_11B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index e474b4ec17f3..a5853a170b58 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -342,7 +342,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c98f2216734f..965d98b9b09f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1841,21 +1841,21 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -1928,11 +1928,11 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1949,13 +1949,13 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index 340b3d68a54e..27fddbcade32 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -52,7 +52,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
if (ppsc->rfpwr_state == ERFON)
@@ -170,7 +170,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -376,7 +376,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
@@ -442,7 +442,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 0748aedce2ad..189cc6437600 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2315,11 +2315,11 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2335,13 +2335,13 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 5a7cd270575a..24ef7cc52e99 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -55,7 +55,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BIT(9));
@@ -126,7 +126,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) &
@@ -429,7 +429,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -516,7 +516,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 33ffc24d3675..7e0f62d59fe1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3300,20 +3300,20 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -3484,12 +3484,12 @@ static bool _rtl8821ae_get_ra_shortgi(struct ieee80211_hw *hw, struct ieee80211_
u8 mac_id)
{
bool b_short_gi = false;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
u8 b_curshortgi_80mhz = 0;
- b_curshortgi_80mhz = (sta->vht_cap.cap &
+ b_curshortgi_80mhz = (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SHORT_GI_80) ? 1 : 0;
if (mac_id == MAC_ID_STATIC_FOR_BROADCAST_MULTICAST)
@@ -3512,7 +3512,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
u32 ratr_bitmap;
u8 ratr_index;
enum wireless_mode wirelessmode = 0;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
bool b_shortgi = false;
u8 rate_mask[7];
@@ -3534,22 +3534,22 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
if (wirelessmode == WIRELESS_MODE_N_5G ||
wirelessmode == WIRELESS_MODE_AC_5G ||
wirelessmode == WIRELESS_MODE_A)
- ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
else
- ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
+ ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
if (wirelessmode == WIRELESS_MODE_N_24G
|| wirelessmode == WIRELESS_MODE_N_5G)
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
else if (wirelessmode == WIRELESS_MODE_AC_24G
|| wirelessmode == WIRELESS_MODE_AC_5G
|| wirelessmode == WIRELESS_MODE_AC_ONLY)
ratr_bitmap |= _rtl8821ae_rate_to_bitmap_2ssvht(
- sta->vht_cap.vht_mcs.rx_mcs_map) << 12;
+ sta->deflink.vht_cap.vht_mcs.rx_mcs_map) << 12;
b_shortgi = _rtl8821ae_get_ra_shortgi(hw, sta, macid);
rf_type = _rtl8821ae_get_ra_rftype(hw, wirelessmode, ratr_bitmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 9d6f8dcbf2d6..d7cb3319d885 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -86,7 +86,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cfosho[0];
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
cck_highpwr = (u8)rtlphy->cck_high_power;
@@ -215,7 +215,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -761,7 +761,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index df750b3a35e9..e76841d3417b 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -55,7 +55,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
- vht_cap = &sta->vht_cap;
+ vht_cap = &sta->deflink.vht_cap;
if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index aa2aeb5fb2cc..e344e058f943 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -585,10 +585,10 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
- bool no_update = si->updated;
bool disable_pt = true;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
@@ -599,7 +599,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
- SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
+ SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
@@ -608,7 +608,6 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
si->init_ra_lv = 0;
- si->updated = true;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
@@ -650,7 +649,7 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
- if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
return;
if (!connect) {
@@ -660,6 +659,10 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
return;
}
+
+ if (!si)
+ return;
+
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
@@ -1048,6 +1051,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif;
struct sk_buff *skb_new;
struct cfg80211_ssid *ssid;
+ u16 tim_offset = 0;
if (rsvd_pkt->type == RSVD_DUMMY) {
skb_new = alloc_skb(1, GFP_KERNEL);
@@ -1066,7 +1070,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
switch (rsvd_pkt->type) {
case RSVD_BEACON:
- skb_new = ieee80211_beacon_get(hw, vif);
+ skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
+ rsvd_pkt->tim_offset = tim_offset;
break;
case RSVD_PS_POLL:
skb_new = ieee80211_pspoll_get(hw, vif);
@@ -2051,7 +2056,10 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct cfg80211_scan_info info = {
.aborted = aborted,
};
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw_hal *hal = &rtwdev->hal;
struct rtw_vif *rtwvif;
+ u8 chan = scan_info->op_chan;
if (!vif)
return;
@@ -2061,10 +2069,14 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_core_scan_complete(rtwdev, vif, true);
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ if (rtwvif->net_type == RTW_NET_MGD_LINKED) {
+ hal->current_channel = chan;
+ hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ }
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
- rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
rtwdev->scan_info.scanning_vif = NULL;
@@ -2173,6 +2185,9 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
enum rtw_scan_notify_id id;
u8 chan, status;
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ return;
+
c2h = get_c2h_from_skb(skb);
chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
id = GET_CHAN_SWITCH_ID(c2h->payload);
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index b59d2cbad5d7..734113fba184 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -172,6 +172,7 @@ struct rtw_rsvd_page {
struct sk_buff *skb;
enum rtw_rsvd_packet_type type;
u8 page;
+ u16 tim_offset;
bool add_txdesc;
struct cfg80211_ssid *ssid;
u16 probe_req_size;
@@ -791,7 +792,8 @@ void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data);
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
-void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index d1678aed9d9c..caf2603da2d6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -75,7 +75,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
- rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
+ rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
break;
case RTW_HCI_TYPE_USB:
break;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 5cdc54c9a9aa..30903c567cd9 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -402,8 +402,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
coex_stat->wl_beacon_interval = conf->beacon_int;
}
- if (changed & BSS_CHANGED_BEACON)
+ if (changed & BSS_CHANGED_BEACON) {
+ rtw_set_dtim_period(rtwdev, conf->dtim_period);
rtw_fw_download_rsvd_page(rtwdev);
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (conf->enable_beacon)
@@ -427,6 +429,18 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ mutex_lock(&rtwdev->mutex);
+ chip->ops->phy_calibration(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -474,6 +488,18 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_fw_download_rsvd_page(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -694,7 +720,7 @@ static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
}
si->use_cfg_mask = true;
- rtw_update_sta_info(br_data->rtwdev, si);
+ rtw_update_sta_info(br_data->rtwdev, si, true);
}
static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
@@ -850,6 +876,17 @@ static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw,
return 0;
}
+static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ if (changed & IEEE80211_RC_BW_CHANGED)
+ rtw_update_sta_info(rtwdev, si, true);
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -861,9 +898,11 @@ const struct ieee80211_ops rtw_ops = {
.change_interface = rtw_ops_change_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
+ .start_ap = rtw_ops_start_ap,
.conf_tx = rtw_ops_conf_tx,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
+ .set_tim = rtw_ops_set_tim,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
.can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu,
@@ -879,6 +918,7 @@ const struct ieee80211_ops rtw_ops = {
.reconfig_complete = rtw_reconfig_complete,
.hw_scan = rtw_ops_hw_scan,
.cancel_hw_scan = rtw_ops_cancel_hw_scan,
+ .sta_rc_update = rtw_ops_sta_rc_update,
.set_sar_specs = rtw_ops_set_sar_specs,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 8b9899e41b0b..14289f83feb5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -280,7 +280,8 @@ static void rtw_ips_work(struct work_struct *work)
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
mutex_lock(&rtwdev->mutex);
- rtw_enter_ips(rtwdev);
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtw_enter_ips(rtwdev);
mutex_unlock(&rtwdev->mutex);
}
@@ -312,7 +313,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw_txq_init(rtwdev, sta->txq[i]);
- rtw_update_sta_info(rtwdev, si);
+ rtw_update_sta_info(rtwdev, si, true);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
rtwdev->sta_cnt++;
@@ -663,6 +664,12 @@ void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel)
}
EXPORT_SYMBOL(rtw_set_rx_freq_band);
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period)
+{
+ rtw_write32_set(rtwdev, REG_TCR, BIT_TCR_UPDATE_TIMIE);
+ rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
@@ -903,7 +910,7 @@ static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
static u64 get_vht_ra_mask(struct ieee80211_sta *sta)
{
u64 ra_mask = 0;
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
u8 vht_mcs_cap;
int i, nss;
@@ -1104,7 +1111,8 @@ static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
return ra_mask;
}
-void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct ieee80211_sta *sta = si->sta;
@@ -1122,19 +1130,19 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
bool is_vht_enable = false;
bool is_support_sgi = false;
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
is_vht_enable = true;
ra_mask |= get_vht_ra_mask(sta);
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = VHT_STBC_EN;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
- } else if (sta->ht_cap.ht_supported) {
- ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
- (sta->ht_cap.mcs.rx_mask[0] << 12);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = HT_LDPC_EN;
}
@@ -1142,12 +1150,12 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
- ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
ra_mask_bak = ra_mask;
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G;
wireless_set = WIRELESS_OFDM | WIRELESS_HT;
} else {
@@ -1155,19 +1163,19 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
}
dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
- ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
ra_mask_bak = ra_mask;
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT | WIRELESS_VHT;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT |
RA_MASK_OFDM_IN_HT_2G;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT;
- } else if (sta->supp_rates[0] <= 0xf) {
+ } else if (sta->deflink.supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG;
@@ -1180,28 +1188,28 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
wireless_set = 0;
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
- is_support_sgi = sta->vht_cap.vht_supported &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ is_support_sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
break;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW_CHANNEL_WIDTH_40;
- is_support_sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ is_support_sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
break;
default:
bw_mode = RTW_CHANNEL_WIDTH_20;
- is_support_sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ is_support_sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
break;
}
- if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) {
tx_num = 2;
rf_type = RF_2T2R;
- } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) {
tx_num = 2;
rf_type = RF_2T2R;
}
@@ -1222,7 +1230,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
si->ra_mask = ra_mask;
si->rate_id = rate_id;
- rtw_fw_send_ra_info(rtwdev, si);
+ rtw_fw_send_ra_info(rtwdev, si, reset_ra_mask);
}
static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
@@ -1353,7 +1361,7 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
rtw_leave_lps(rtwdev);
- if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) {
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) {
ret = rtw_leave_ips(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave idle state\n");
@@ -1389,7 +1397,7 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
- if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan)
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
@@ -1453,6 +1461,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1470,7 +1479,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_SGI_40;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ ht_cap->ampdu_density = chip->ampdu_density;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (efuse->hw_cap.nss > 1) {
ht_cap->mcs.rx_mask[0] = 0xFF;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 17815af9dd4e..0baaf5a32e82 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -17,7 +17,6 @@
#include "util.h"
-#define RTW_NAPI_WEIGHT_NUM 64
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
@@ -580,6 +579,7 @@ struct rtw_tx_pkt_info {
u32 tx_pkt_size;
u8 offset;
u8 pkt_offset;
+ u8 tim_offset;
u8 mac_id;
u8 rate_id;
u8 rate;
@@ -753,7 +753,6 @@ struct rtw_sta_info {
u8 ldpc_en:2;
bool sgi_enable;
bool vht_enable;
- bool updated;
u8 init_ra_lv;
u64 ra_mask;
@@ -1179,6 +1178,7 @@ struct rtw_chip_info {
bool rx_ldpc;
bool tx_stbc;
u8 max_power_index;
+ u8 ampdu_density;
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
@@ -2132,6 +2132,7 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
}
void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -2145,7 +2146,8 @@ void rtw_chip_prepare_tx(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
void rtw_tx_report_purge_timer(struct timer_list *t);
-void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask);
void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a0991d3f15c0..24d5695363d3 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -689,6 +689,9 @@ static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
queue = RTW_TX_QUEUE_BCN;
else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
queue = RTW_TX_QUEUE_MGMT;
+ else if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ queue = RTW_TX_QUEUE_HI0;
else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
queue = ac_to_hwq[IEEE80211_AC_BE];
else
@@ -1479,12 +1482,15 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_chip_info *chip = rtwdev->chip;
+ struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
int i;
+ int ret;
cut = BIT(0) << rtwdev->hal.cut_version;
@@ -1517,6 +1523,15 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
}
rtw_pci_link_cfg(rtwdev);
+
+ /* Disable 8821ce completion timeout by default */
+ if (chip->id == RTW_CHIP_TYPE_8821C) {
+ ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
+ if (ret)
+ rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
+ ret);
+ }
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
@@ -1703,7 +1718,7 @@ static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
init_dummy_netdev(&rtwpci->netdev);
netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
- RTW_NAPI_WEIGHT_NUM);
+ NAPI_POLL_WEIGHT);
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1770,7 +1785,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
}
/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
- if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL)
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
rtwpci->rx_no_aspm = true;
rtw_pci_phy_cfg(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index e505d17f107e..8982e0c98dac 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -536,7 +536,7 @@ static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
struct rtw_dev *rtwdev = data;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- rtw_update_sta_info(rtwdev, si);
+ rtw_update_sta_info(rtwdev, si, false);
}
static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 84ba9ec489c3..03bd8dc53f72 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -389,12 +389,14 @@
#define BIT_EN_FREE_CNT BIT(3)
#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
#define REG_HIQ_NO_LMT_EN 0x5A7
+#define REG_DTIM_COUNTER_ROOT 0x5A8
#define BIT_HIQ_NO_LMT_EN_ROOT BIT(0)
#define REG_TIMER0_SRC_SEL 0x05B4
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
#define BIT_PWRMGT_HWDATA_EN BIT(7)
+#define BIT_TCR_UPDATE_TIMIE BIT(5)
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index ad2b323a0423..93cce44df531 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2747,6 +2747,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
.rx_ldpc = false,
.pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
.iqk_threshold = 8,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.coex_para_ver = 0x2007022f,
.bt_desired_ver = 0x2f,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 99eee128ae94..ffee39ea5df6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -512,6 +512,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 rx_power;
u8 lna_idx = 0;
u8 vga_idx = 0;
@@ -523,6 +524,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = rx_power;
}
@@ -530,6 +532,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
@@ -549,6 +552,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = bw;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
@@ -1919,6 +1923,7 @@ struct rtw_chip_info rtw8821c_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
.coex_para_ver = 0x19092746,
.bt_desired_ver = 0x46,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
index 8e8915c5c498..6c82c4383497 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
@@ -13,7 +13,7 @@ static const u32 rtw8821c_mac[] = {
0x04F, 0x00000001,
0x029, 0x000000F9,
0x420, 0x00000080,
- 0x421, 0x0000000F,
+ 0x421, 0x0000001F,
0x428, 0x0000000A,
0x429, 0x00000010,
0x430, 0x00000000,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index f34de115e4bc..56d22f9de904 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -9,6 +9,10 @@
static const struct pci_device_id rtw_8821ce_id_table[] = {
{
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB821),
+ .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec
+ },
+ {
PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC821),
.driver_data = (kernel_ulong_t)&rtw8821c_hw_spec
},
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index eee7bf035403..dccd722b8e62 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2548,6 +2548,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.edcca_th = rtw8822b_edcca_th,
.l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
.l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index cd74607a61a2..c043b5c520b9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5368,6 +5368,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.edcca_th = rtw8822c_edcca_th,
.l2h_th_ini_cs = 60,
.l2h_th_ini_ad = 45,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index d2d607e22198..84aedabdf285 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -158,7 +158,8 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) &&
+ test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status);
if (pkt_stat->crc_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 94d1089f4022..60d40a5c2c6a 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -67,12 +67,16 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr);
SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null);
+ if (pkt_info->tim_offset) {
+ SET_TX_DESC_TIM_EN(txdesc, 1);
+ SET_TX_DESC_TIM_OFFSET(txdesc, pkt_info->tim_offset);
+ }
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
{
- u8 exp = sta->ht_cap.ampdu_factor;
+ u8 exp = sta->deflink.ht_cap.ampdu_factor;
/* the least ampdu factor is 8K, and the value in the tx desc is the
* max aggregation num, which represents val * 2 packets can be
@@ -83,7 +87,7 @@ static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
{
- return sta->ht_cap.ampdu_density;
+ return sta->deflink.ht_cap.ampdu_density;
}
static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
@@ -91,7 +95,7 @@ static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
{
u8 rate;
- if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
+ if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0)
rate = DESC_RATEMCS15;
else
rate = DESC_RATEMCS7;
@@ -106,7 +110,7 @@ static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
u8 rate;
u16 tx_mcs_map;
- tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
if (efuse->hw_cap.nss == 1) {
switch (tx_mcs_map & 0x3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
@@ -340,11 +344,11 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold)
pkt_info->rts = true;
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
rate = get_highest_ht_tx_rate(rtwdev, sta);
- else if (sta->supp_rates[0] <= 0xf)
+ else if (sta->deflink.supp_rates[0] <= 0xf)
rate = DESC_RATE11M;
else
rate = DESC_RATE54M;
@@ -448,6 +452,19 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
if (type == RSVD_QOS_NULL)
pkt_info->bt_null = true;
+ if (type == RSVD_BEACON) {
+ struct rtw_rsvd_page *rsvd_pkt;
+ int hdr_len;
+
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page,
+ build_list);
+ if (rsvd_pkt && rsvd_pkt->tim_offset != 0) {
+ hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ pkt_info->tim_offset = rsvd_pkt->tim_offset - hdr_len;
+ }
+ }
+
rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
/* TODO: need to change hw port and hw ssn sel for multiple vifs */
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 56371eff9f7f..8419603adce4 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -33,6 +33,10 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5))
#define SET_TX_DESC_SW_SEQ(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
+#define SET_TX_DESC_TIM_EN(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, BIT(7))
+#define SET_TX_DESC_TIM_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(6, 0))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
#define SET_TX_DESC_USE_RTS(tx_desc, value) \
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 305dbbebff6b..8a26adeb23fb 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -18,7 +18,7 @@ rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
u8 *cmd;
int i, j;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(cmd_len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, cmd_len);
if (!skb)
return NULL;
@@ -244,6 +244,12 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
addr_cam->sec_entries[key_idx] = sec_cam;
set_bit(key_idx, addr_cam->sec_cam_map);
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n",
+ ret);
+ return ret;
+ }
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
@@ -320,6 +326,7 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 hw_key_type;
bool ext_key = false;
int ret;
@@ -353,7 +360,8 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
return -EOPNOTSUPP;
}
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (!chip->hw_sec_hdr)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
ret = rtw89_cam_sec_key_install(rtwdev, vif, sta, key, hw_key_type,
ext_key);
@@ -396,6 +404,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
clear_bit(key_idx, addr_cam->sec_cam_map);
addr_cam->sec_entries[key_idx] = NULL;
if (inform_fw) {
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret);
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret)
rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
@@ -421,10 +432,8 @@ static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw,
void *data)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtw89_cam_sec_key_del(rtwdev, vif, sta, key, false);
- rtw89_cam_deinit(rtwdev, rtwvif);
}
void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
@@ -480,6 +489,12 @@ int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
int i;
int ret;
+ if (unlikely(addr_cam->valid)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "addr cam is already valid; skip init\n");
+ return 0;
+ }
+
ret = rtw89_cam_get_avail_addr_cam(rtwdev, &addr_cam_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get available addr cam\n");
@@ -531,6 +546,12 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
u8 bssid_cam_idx;
int ret;
+ if (unlikely(bssid_cam->valid)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "bssid cam is already valid; skip init\n");
+ return 0;
+ }
+
ret = rtw89_cam_get_avail_bssid_cam(rtwdev, &bssid_cam_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get available bssid cam\n");
@@ -698,3 +719,31 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]);
FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ u8 *cmd)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
+ SET_DCTL_OPERATION_V1(cmd, 1);
+
+ SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]);
+ SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]);
+ SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]);
+ SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]);
+ SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]);
+ SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]);
+ SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]);
+
+ SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff);
+ SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]);
+ SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]);
+ SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]);
+ SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]);
+ SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]);
+ SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
+ SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 3a6a786530d1..a3931d3e40d2 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -355,6 +355,10 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
const u8 *scan_mac_addr, u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ u8 *cmd);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif, u8 *cmd);
int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 684583955511..683854bba217 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -3068,7 +3068,17 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (rtwdev->dbcc_en) {
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
+ BTC_RSN_NTFY_SCAN_START);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0,
+ BTC_RSN_NTFY_SCAN_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
+ } else if (rtwdev->dbcc_en) {
if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G &&
wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
_action_wl_5g(rtwdev);
@@ -4169,14 +4179,14 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], STA support HE=%d VHT=%d HT=%d\n",
- sta->he_cap.has_he,
- sta->vht_cap.vht_supported,
- sta->ht_cap.ht_supported);
- if (sta->he_cap.has_he)
+ sta->deflink.he_cap.has_he,
+ sta->deflink.vht_cap.vht_supported,
+ sta->deflink.ht_cap.ht_supported);
+ if (sta->deflink.he_cap.has_he)
mode |= BIT(BTC_WL_MODE_HE);
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
mode |= BIT(BTC_WL_MODE_VHT);
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
mode |= BIT(BTC_WL_MODE_HT);
r.mode = mode;
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index bcefc968576e..e3317deafa1d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -424,10 +424,10 @@ rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
rtwsta->ampdu_params[tid].agg_num :
- 4 << sta->ht_cap.ampdu_factor) - 1);
+ 4 << sta->deflink.ht_cap.ampdu_factor) - 1);
desc_info->agg_en = true;
- desc_info->ampdu_density = sta->ht_cap.ampdu_density;
+ desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density;
desc_info->ampdu_num = ampdu_num;
}
@@ -435,6 +435,7 @@ static void
rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = tx_req->vif;
struct ieee80211_sta *sta = tx_req->sta;
struct ieee80211_tx_info *info;
@@ -446,6 +447,7 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
+ u64 pn64;
if (!vif) {
rtw89_warn(rtwdev, "cannot set sec key without vif\n");
@@ -491,8 +493,21 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
}
desc_info->sec_en = true;
+ desc_info->sec_keyid = key->keyidx;
desc_info->sec_type = sec_type;
desc_info->sec_cam_idx = sec_cam->sec_cam_idx;
+
+ if (!chip->hw_sec_hdr)
+ return;
+
+ pn64 = atomic64_inc_return(&key->tx_pn);
+ desc_info->sec_seq[0] = pn64;
+ desc_info->sec_seq[1] = pn64 >> 8;
+ desc_info->sec_seq[2] = pn64 >> 16;
+ desc_info->sec_seq[3] = pn64 >> 24;
+ desc_info->sec_seq[4] = pn64 >> 32;
+ desc_info->sec_seq[5] = pn64 >> 40;
+ desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */
}
static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
@@ -597,7 +612,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
if (pkt_type < PACKET_MAX)
return false;
- if (!sta || !sta->he_cap.has_he)
+ if (!sta || !sta->deflink.he_cap.has_he)
return false;
if (!ieee80211_is_data_qos(fc))
@@ -755,11 +770,22 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
return PACKET_MAX;
}
+static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
+ desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
+}
+
static void
rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
- if (!rtwdev->fw.tx_wake)
+ if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
return;
if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
@@ -806,6 +832,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_data_info(rtwdev, tx_req);
pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
+ rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
break;
case RTW89_CORE_TX_TYPE_FWCMD:
rtw89_core_tx_update_h2c_info(rtwdev, tx_req);
@@ -829,6 +856,13 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
u32 cnt;
int ret;
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "ignore h2c due to power is off with firmware state=%d\n",
+ test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
+ return 0;
+ }
+
tx_req.skb = skb;
tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD;
if (fwdl)
@@ -897,6 +931,27 @@ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
+ FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
+ FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
+ FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
@@ -916,6 +971,32 @@ static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
+ FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) |
+ FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
@@ -926,6 +1007,13 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) |
@@ -946,6 +1034,15 @@ static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
+ FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
+ FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
@@ -977,6 +1074,54 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_core_fill_txdesc);
+void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc;
+ struct rtw89_txwd_info *txwd_info;
+
+ txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info);
+ txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info);
+ txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
+ txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
+ if (desc_info->sec_en) {
+ txwd_body->dword4 = rtw89_build_txwd_body4(desc_info);
+ txwd_body->dword5 = rtw89_build_txwd_body5(desc_info);
+ }
+ txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info);
+
+ if (!desc_info->en_wd_info)
+ return;
+
+ txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
+ txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info);
+ txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
+ txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info);
+ txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1);
+
+static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
+ FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
+ RTW89_CORE_RX_TYPE_FWDL :
+ RTW89_CORE_RX_TYPE_H2C);
+
+ return cpu_to_le32(dword);
+}
+
+void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc;
+
+ txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info);
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1);
+
static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
struct sk_buff *skb,
struct rtw89_rx_phy_ppdu *phy_ppdu)
@@ -1282,7 +1427,10 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
{
rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ /* In low power mode, it does RX in thread context. */
+ local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ local_bh_enable();
rtwdev->napi_budget_countdown--;
}
@@ -1354,6 +1502,7 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_rxdesc_short *rxd_s;
struct rtw89_rxdesc_long *rxd_l;
u8 shift_len, drv_info_len;
@@ -1364,7 +1513,10 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s);
desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s);
desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s);
- desc_info->bw = RTW89_GET_RXWD_BW(rxd_s);
+ if (chip->chip_id == RTL8852C)
+ desc_info->bw = RTW89_GET_RXWD_BW_V1(rxd_s);
+ else
+ desc_info->bw = RTW89_GET_RXWD_BW(rxd_s);
desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s);
desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s);
desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s);
@@ -1454,7 +1606,8 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (rtwdev->scanning && rtwdev->fw.scan_offload) {
+ if (rtwdev->scanning &&
+ RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
rx_status->freq =
ieee80211_channel_to_frequency(hal->current_channel,
hal->current_band_type);
@@ -1797,9 +1950,9 @@ static void rtw89_ips_work(struct work_struct *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
ips_work);
-
mutex_lock(&rtwdev->mutex);
- rtw89_enter_ips(rtwdev);
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtw89_enter_ips(rtwdev);
mutex_unlock(&rtwdev->mutex);
}
@@ -2563,8 +2716,11 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
/* efuse process */
/* pre-config BB/RF, BB reset/RFC reset */
- rtw89_mac_disable_bb_rf(rtwdev);
- rtw89_mac_enable_bb_rf(rtwdev);
+ rtw89_chip_disable_bb_rf(rtwdev);
+ ret = rtw89_chip_enable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
+
rtw89_phy_init_bb_reg(rtwdev);
rtw89_phy_init_rf_reg(rtwdev);
@@ -2702,7 +2858,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
- if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
rtw89_leave_ips(rtwdev);
ether_addr_copy(rtwvif->mac_addr, mac_addr);
@@ -2726,7 +2882,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
rtwdev->scanning = false;
rtwdev->dig.bypass_dig = true;
- if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
@@ -2751,6 +2907,8 @@ static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
rtwdev->hal.support_cckpd =
!(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
!(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
+ rtwdev->hal.support_igi =
+ rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV;
}
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 771722132c53..2921814842ff 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -74,6 +74,16 @@ enum rtw89_subband {
RTW89_SUBBAND_NR,
};
+enum rtw89_gain_offset {
+ RTW89_GAIN_OFFSET_2G_CCK,
+ RTW89_GAIN_OFFSET_2G_OFDM,
+ RTW89_GAIN_OFFSET_5G_LOW,
+ RTW89_GAIN_OFFSET_5G_MID,
+ RTW89_GAIN_OFFSET_5G_HIGH,
+
+ RTW89_GAIN_OFFSET_NR,
+};
+
enum rtw89_hci_type {
RTW89_HCI_TYPE_PCIE,
RTW89_HCI_TYPE_USB,
@@ -117,6 +127,8 @@ enum rtw89_core_rx_type {
RTW89_CORE_RX_TYPE_C2H = 10,
RTW89_CORE_RX_TYPE_CSI = 11,
RTW89_CORE_RX_TYPE_CQI = 12,
+ RTW89_CORE_RX_TYPE_H2C = 13,
+ RTW89_CORE_RX_TYPE_FWDL = 14,
};
enum rtw89_txq_flags {
@@ -399,6 +411,7 @@ enum rtw89_rate_section {
RTW89_RS_OFFSET,
RTW89_RS_MAX,
RTW89_RS_LMT_NUM = RTW89_RS_MCS + 1,
+ RTW89_RS_TX_SHAPE_NUM = RTW89_RS_OFDM + 1,
};
enum rtw89_rate_max {
@@ -446,6 +459,7 @@ enum rtw89_regulation_type {
RTW89_UKRAINE = 11,
RTW89_CN = 12,
RTW89_QATAR = 13,
+ RTW89_UK = 14,
RTW89_REGD_NUM,
};
@@ -575,7 +589,7 @@ enum rtw89_ps_mode {
#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -639,6 +653,17 @@ struct rtw89_txwd_body {
__le32 dword5;
} __packed;
+struct rtw89_txwd_body_v1 {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+ __le32 dword6;
+ __le32 dword7;
+} __packed;
+
struct rtw89_txwd_info {
__le32 dword0;
__le32 dword1;
@@ -718,8 +743,11 @@ struct rtw89_tx_desc_info {
u8 ampdu_density;
u8 ampdu_num;
bool sec_en;
+ u8 addr_info_nr;
+ u8 sec_keyid;
u8 sec_type;
u8 sec_cam_idx;
+ u8 sec_seq[6];
u16 data_rate;
u16 data_retry_lowest_rate;
bool fw_dl;
@@ -2008,6 +2036,8 @@ struct rtw89_hci_ops {
void (*reset)(struct rtw89_dev *rtwdev);
int (*start)(struct rtw89_dev *rtwdev);
void (*stop)(struct rtw89_dev *rtwdev);
+ void (*pause)(struct rtw89_dev *rtwdev, bool pause);
+ void (*switch_mode)(struct rtw89_dev *rtwdev, bool low_power);
void (*recalc_int_mit)(struct rtw89_dev *rtwdev);
u8 (*read8)(struct rtw89_dev *rtwdev, u32 addr);
@@ -2025,6 +2055,13 @@ struct rtw89_hci_ops {
int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step);
void (*dump_err_status)(struct rtw89_dev *rtwdev);
int (*napi_poll)(struct napi_struct *napi, int budget);
+
+ /* Deal with locks inside recovery_start and recovery_complete callbacks
+ * by hci instance, and handle things which need to consider under SER.
+ * e.g. turn on/off interrupts except for the one for halt notification.
+ */
+ void (*recovery_start)(struct rtw89_dev *rtwdev);
+ void (*recovery_complete)(struct rtw89_dev *rtwdev);
};
struct rtw89_hci_info {
@@ -2032,9 +2069,12 @@ struct rtw89_hci_info {
enum rtw89_hci_type type;
u32 rpwm_addr;
u32 cpwm_addr;
+ bool paused;
};
struct rtw89_chip_ops {
+ int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
+ void (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -2064,16 +2104,26 @@ struct rtw89_chip_ops {
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
+ void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
+ void (*fill_txdesc)(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+ void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+ int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -2261,6 +2311,51 @@ struct rtw89_page_regs {
u32 wp_page_info1;
};
+struct rtw89_imr_info {
+ u32 wdrls_imr_set;
+ u32 wsec_imr_reg;
+ u32 wsec_imr_set;
+ u32 mpdu_tx_imr_set;
+ u32 mpdu_rx_imr_set;
+ u32 sta_sch_imr_set;
+ u32 txpktctl_imr_b0_reg;
+ u32 txpktctl_imr_b0_clr;
+ u32 txpktctl_imr_b0_set;
+ u32 txpktctl_imr_b1_reg;
+ u32 txpktctl_imr_b1_clr;
+ u32 txpktctl_imr_b1_set;
+ u32 wde_imr_clr;
+ u32 wde_imr_set;
+ u32 ple_imr_clr;
+ u32 ple_imr_set;
+ u32 host_disp_imr_clr;
+ u32 host_disp_imr_set;
+ u32 cpu_disp_imr_clr;
+ u32 cpu_disp_imr_set;
+ u32 other_disp_imr_clr;
+ u32 other_disp_imr_set;
+ u32 bbrpt_chinfo_err_imr_reg;
+ u32 bbrpt_err_imr_set;
+ u32 bbrpt_dfs_err_imr_reg;
+ u32 ptcl_imr_clr;
+ u32 ptcl_imr_set;
+ u32 cdma_imr_0_reg;
+ u32 cdma_imr_0_clr;
+ u32 cdma_imr_0_set;
+ u32 cdma_imr_1_reg;
+ u32 cdma_imr_1_clr;
+ u32 cdma_imr_1_set;
+ u32 phy_intf_imr_reg;
+ u32 phy_intf_imr_clr;
+ u32 phy_intf_imr_set;
+ u32 rmac_imr_reg;
+ u32 rmac_imr_clr;
+ u32 rmac_imr_set;
+ u32 tmac_imr_reg;
+ u32 tmac_imr_clr;
+ u32 tmac_imr_set;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
@@ -2268,11 +2363,13 @@ struct rtw89_chip_info {
u32 fifo_size;
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
+ u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
u8 support_bands;
bool support_bw160;
+ bool hw_sec_hdr;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -2292,10 +2389,12 @@ struct rtw89_chip_info {
const struct rtw89_pwr_cfg * const *pwr_on_seq;
const struct rtw89_pwr_cfg * const *pwr_off_seq;
const struct rtw89_phy_table *bb_table;
+ const struct rtw89_phy_table *bb_gain_table;
const struct rtw89_phy_table *rf_table[RF_PATH_MAX];
const struct rtw89_phy_table *nctl_table;
const struct rtw89_txpwr_table *byr_table;
const struct rtw89_phy_dig_gain_table *dig_table;
+ const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table;
const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM];
@@ -2333,8 +2432,12 @@ struct rtw89_chip_info {
u8 rf_para_dlink_num;
const struct rtw89_btc_rf_trx_para *rf_para_dlink;
u8 ps_mode_supported;
+ u8 low_power_hci_modes;
+ u32 h2c_cctl_func_id;
u32 hci_func_en_addr;
+ u32 h2c_desc_size;
+ u32 txwd_body_size;
u32 h2c_ctrl_reg;
const u32 *h2c_regs;
u32 c2h_ctrl_reg;
@@ -2342,6 +2445,7 @@ struct rtw89_chip_info {
const struct rtw89_page_regs *page_regs;
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
+ const struct rtw89_imr_info *imr_info;
};
union rtw89_bus_info {
@@ -2388,6 +2492,13 @@ enum rtw89_fw_type {
RTW89_FW_WOWLAN = 3,
};
+enum rtw89_fw_feature {
+ RTW89_FW_FEATURE_OLD_HT_RA_FORMAT,
+ RTW89_FW_FEATURE_SCAN_OFFLOAD,
+ RTW89_FW_FEATURE_TX_WAKE,
+ RTW89_FW_FEATURE_CRASH_TRIGGER,
+};
+
struct rtw89_fw_suit {
const u8 *data;
u32 size;
@@ -2417,11 +2528,15 @@ struct rtw89_fw_info {
struct rtw89_fw_suit normal;
struct rtw89_fw_suit wowlan;
bool fw_log_enable;
- bool old_ht_ra_format;
- bool scan_offload;
- bool tx_wake;
+ u32 feature_map;
};
+#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
+ (!!((_fw)->feature_map & BIT(RTW89_FW_FEATURE_ ## _feat)))
+
+#define RTW89_SET_FW_FEATURE(_fw_feature, _fw) \
+ ((_fw)->feature_map |= BIT(_fw_feature))
+
struct rtw89_cam_info {
DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM);
@@ -2469,6 +2584,7 @@ struct rtw89_hal {
u8 tx_nss;
u8 rx_nss;
bool support_cckpd;
+ bool support_igi;
};
#define RTW89_MAX_MAC_ID_NUM 128
@@ -2484,6 +2600,7 @@ enum rtw89_flags {
RTW89_FLAG_LEISURE_PS,
RTW89_FLAG_LOW_POWER_MODE,
RTW89_FLAG_INACTIVE_PS,
+ RTW89_FLAG_RESTART_TRIGGER,
NUM_OF_RTW89_FLAGS,
};
@@ -2518,9 +2635,21 @@ struct rtw89_dack_info {
#define RTW89_IQK_CHS_NR 2
#define RTW89_IQK_PATH_NR 4
+
+struct rtw89_mcc_info {
+ u8 ch[RTW89_IQK_CHS_NR];
+ u8 band[RTW89_IQK_CHS_NR];
+ u8 table_idx;
+};
+
+struct rtw89_lck_info {
+ u8 thermal[RF_PATH_MAX];
+};
+
struct rtw89_iqk_info {
bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ bool lok_fail[RTW89_IQK_PATH_NR];
bool iqk_tx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
bool iqk_rx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
u32 iqk_fail_cnt;
@@ -2549,6 +2678,8 @@ struct rtw89_iqk_info {
u32 syn1to2;
u8 iqk_mcc_ch[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
u8 iqk_table_idx[RTW89_IQK_PATH_NR];
+ u32 lok_idac[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ u32 lok_vbuf[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
};
#define RTW89_DPK_RF_PATH 2
@@ -2559,6 +2690,7 @@ struct rtw89_dpk_bkup_para {
enum rtw89_bandwidth bw;
u8 ch;
bool path_ok;
+ u8 mdpd_en;
u8 txagc_dpk;
u8 ther_dpk;
u8 gs;
@@ -2568,11 +2700,12 @@ struct rtw89_dpk_bkup_para {
struct rtw89_dpk_info {
bool is_dpk_enable;
bool is_dpk_reload_en;
- u16 dc_i[RTW89_DPK_RF_PATH];
- u16 dc_q[RTW89_DPK_RF_PATH];
- u8 corr_val[RTW89_DPK_RF_PATH];
- u8 corr_idx[RTW89_DPK_RF_PATH];
+ u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 corr_idx[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u8 cur_idx[RTW89_DPK_RF_PATH];
+ u8 cur_k_set;
struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
};
@@ -2581,6 +2714,7 @@ struct rtw89_fem_info {
bool elna_5g;
bool epa_2g;
bool epa_5g;
+ bool epa_6g;
};
struct rtw89_phy_ch_info {
@@ -2853,8 +2987,8 @@ struct rtw89_ser {
struct work_struct ser_hdl_work;
struct delayed_work ser_alarm_work;
- struct state_ent *st_tbl;
- struct event_ent *ev_tbl;
+ const struct state_ent *st_tbl;
+ const struct event_ent *ev_tbl;
struct list_head msg_q;
spinlock_t msg_q_lock; /* lock when read/write ser msg */
DECLARE_BITMAP(flags, RTW89_NUM_OF_SER_FLAGS);
@@ -2898,6 +3032,47 @@ struct rtw89_hw_scan_info {
u8 op_band;
};
+enum rtw89_phy_bb_gain_band {
+ RTW89_BB_GAIN_BAND_2G = 0,
+ RTW89_BB_GAIN_BAND_5G_L = 1,
+ RTW89_BB_GAIN_BAND_5G_M = 2,
+ RTW89_BB_GAIN_BAND_5G_H = 3,
+ RTW89_BB_GAIN_BAND_6G_L = 4,
+ RTW89_BB_GAIN_BAND_6G_M = 5,
+ RTW89_BB_GAIN_BAND_6G_H = 6,
+ RTW89_BB_GAIN_BAND_6G_UH = 7,
+
+ RTW89_BB_GAIN_BAND_NR,
+};
+
+enum rtw89_phy_bb_rxsc_num {
+ RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
+ RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
+ RTW89_BB_RXSC_NUM_160 = 15, /* SC: 0, 1~8, 9~12, 13~14 */
+};
+
+struct rtw89_phy_bb_gain_info {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [LNA_GAIN_NUM + 1]; /* TIA0_LNA0~6 + TIA1_LNA6 */
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_40];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_80];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_160];
+};
+
+struct rtw89_phy_efuse_gain {
+ bool offset_valid;
+ s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */
+ s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -2948,6 +3123,8 @@ struct rtw89_dev {
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
+ struct rtw89_mcc_info mcc;
+ struct rtw89_lck_info lck;
bool is_tssi_mode[RF_PATH_MAX];
bool is_bt_iqk_timeout;
@@ -2960,6 +3137,9 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
+ struct rtw89_phy_bb_gain_info bb_gain;
+ struct rtw89_phy_efuse_gain efuse_gain;
+
struct delayed_work track_work;
struct delayed_work coex_act1_work;
struct delayed_work coex_bt_devinfo_work;
@@ -3011,6 +3191,16 @@ static inline int rtw89_hci_deinit(struct rtw89_dev *rtwdev)
return rtwdev->hci.ops->deinit(rtwdev);
}
+static inline void rtw89_hci_pause(struct rtw89_dev *rtwdev, bool pause)
+{
+ rtwdev->hci.ops->pause(rtwdev, pause);
+}
+
+static inline void rtw89_hci_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
+{
+ rtwdev->hci.ops->switch_mode(rtwdev, low_power);
+}
+
static inline void rtw89_hci_recalc_int_mit(struct rtw89_dev *rtwdev)
{
rtwdev->hci.ops->recalc_int_mit(rtwdev);
@@ -3029,10 +3219,25 @@ static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
bool drop)
{
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ return;
+
if (rtwdev->hci.ops->flush_queues)
return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
}
+static inline void rtw89_hci_recovery_start(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->recovery_start)
+ rtwdev->hci.ops->recovery_start(rtwdev);
+}
+
+static inline void rtw89_hci_recovery_complete(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->recovery_complete)
+ rtwdev->hci.ops->recovery_complete(rtwdev);
+}
+
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read8(rtwdev, addr);
@@ -3440,6 +3645,14 @@ static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev,
chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en);
}
+static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->cfg_txrx_path)
+ chip->ops->cfg_txrx_path(rtwdev);
+}
+
static inline
void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
@@ -3474,6 +3687,26 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
}
static inline
+void rtw89_chip_fill_txdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->fill_txdesc(rtwdev, desc_info, txdesc);
+}
+
+static inline
+void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
+}
+
+static inline
void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
@@ -3506,6 +3739,18 @@ int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en);
}
+static inline
+int rtw89_chip_h2c_dctl_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->h2c_dctl_sec_cam)
+ return 0;
+ return chip->ops->h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+}
+
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
__le16 fc = hdr->frame_control;
@@ -3520,10 +3765,12 @@ static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta)
{
- if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
- (sta->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
- (sta->he_cap.he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
+ if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[4] &
+ IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
return true;
return false;
}
@@ -3546,6 +3793,12 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index b73cc03cecfd..f93f3fee1505 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -724,26 +724,6 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
return count;
}
-static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
- [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
- [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
- [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
- [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
- [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
- [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
- [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
- [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
- [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
- [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
- [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
- [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
-};
-
static void rtw89_debug_dump_mac_mem(struct seq_file *m,
struct rtw89_dev *rtwdev,
u8 sel, u32 start_addr, u32 len)
@@ -757,7 +737,7 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m,
pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1;
start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
- base_addr = mac_mem_base_addr_table[sel];
+ base_addr = rtw89_mac_mem_base_addrs[sel];
base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
for (p = 0; p < pages; p++) {
@@ -2204,6 +2184,48 @@ out:
return count;
}
+static int
+rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ seq_printf(m, "%d\n",
+ test_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags));
+ return 0;
+}
+
+static ssize_t
+rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ bool fw_crash;
+ int ret;
+
+ if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
+ return -EOPNOTSUPP;
+
+ ret = kstrtobool_from_user(user_buf, count, &fw_crash);
+ if (ret)
+ return -EINVAL;
+
+ if (!fw_crash)
+ return -EINVAL;
+
+ mutex_lock(&rtwdev->mutex);
+ set_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
+ ret = rtw89_fw_h2c_trigger_cpu_exception(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
@@ -2488,6 +2510,11 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
.cb_write = rtw89_debug_priv_early_h2c_set,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
+ .cb_read = rtw89_debug_priv_fw_crash_get,
+ .cb_write = rtw89_debug_priv_fw_crash_set,
+};
+
static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
.cb_read = rtw89_debug_priv_btc_info_get,
};
@@ -2542,6 +2569,7 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_dbg_port_dump);
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
+ rtw89_debugfs_add_rw(fw_crash);
rtw89_debugfs_add_r(btc_info);
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 6deaf8eec6b4..e4be785709d1 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -10,31 +10,33 @@
#include "phy.h"
#include "reg.h"
-static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header)
+static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
+ bool header)
{
struct sk_buff *skb;
u32 header_len = 0;
+ u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
if (header)
header_len = H2C_HEADER_LEN;
- skb = dev_alloc_skb(len + header_len + 24);
+ skb = dev_alloc_skb(len + header_len + h2c_desc_size);
if (!skb)
return NULL;
- skb_reserve(skb, header_len + 24);
+ skb_reserve(skb, header_len + h2c_desc_size);
memset(skb->data, 0, len);
return skb;
}
-struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len)
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
{
- return rtw89_fw_h2c_alloc_skb(len, true);
+ return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
}
-struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len)
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
{
- return rtw89_fw_h2c_alloc_skb(len, false);
+ return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
}
static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
@@ -193,22 +195,56 @@ int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
return 0;
}
+#define __DEF_FW_FEAT_COND(__cond, __op) \
+static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
+{ \
+ return suit_ver_code __op comp_ver_code; \
+}
+
+__DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
+__DEF_FW_FEAT_COND(le, <=); /* less or equal */
+
+struct __fw_feat_cfg {
+ enum rtw89_core_chip_id chip_id;
+ enum rtw89_fw_feature feature;
+ u32 ver_code;
+ bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
+};
+
+#define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
+ { \
+ .chip_id = _chip, \
+ .feature = RTW89_FW_FEATURE_ ## _feat, \
+ .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
+ .cond = __fw_feat_cond_ ## _cond, \
+ }
+
+static const struct __fw_feat_cfg fw_feat_tbl[] = {
+ __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
+};
+
static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ const struct __fw_feat_cfg *ent;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+ int i;
- if (chip->chip_id == RTL8852A &&
- RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
- rtwdev->fw.old_ht_ra_format = true;
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
- if (chip->chip_id == RTL8852A &&
- RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
- rtwdev->fw.scan_offload = true;
+ for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
+ ent = &fw_feat_tbl[i];
+ if (chip->chip_id != ent->chip_id)
+ continue;
- if (chip->chip_id == RTL8852A &&
- RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
- rtwdev->fw.tx_wake = true;
+ if (ent->cond(suit_ver_code, ent->ver_code))
+ RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
+ }
}
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
@@ -275,7 +311,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
struct sk_buff *skb;
u32 ret = 0;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
return -ENOMEM;
@@ -341,7 +377,7 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
else
pkt_len = residue_len;
- skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len);
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -536,7 +572,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -563,6 +599,41 @@ fail:
return -EBUSY;
}
+#define H2C_DCTL_SEC_CAM_LEN 68
+int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
+
+ rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
+ H2C_DCTL_SEC_CAM_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
+
#define H2C_BA_CAM_LEN 8
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
@@ -585,7 +656,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
@@ -631,7 +702,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
return -ENOMEM;
@@ -667,7 +738,7 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -704,7 +775,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -744,13 +815,14 @@ fail:
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
u8 macid = rtwvif->mac_id;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -758,16 +830,18 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
skb_put(skb, H2C_CMC_TBL_LEN);
SET_CTRL_INFO_MACID(skb->data, macid);
SET_CTRL_INFO_OPERATION(skb->data, 1);
- SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
- SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
- SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
- SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
- SET_CMC_TBL_ANTSEL_A(skb->data, 0);
- SET_CMC_TBL_ANTSEL_B(skb->data, 0);
- SET_CMC_TBL_ANTSEL_C(skb->data, 0);
- SET_CMC_TBL_ANTSEL_D(skb->data, 0);
+ if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
+ SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
+ SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
+ SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
+ SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_A(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_B(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_C(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_D(skb->data, 0);
+ }
SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
@@ -775,7 +849,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
- H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
@@ -795,26 +869,28 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
{
bool ppe_th;
u8 ppe16, ppe8;
- u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
- u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0];
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
u8 ru_bitmap;
u8 n, idx, sh;
u16 ppe;
int i;
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
- sta->he_cap.he_cap_elem.phy_cap_info[6]);
+ sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
u8 pad;
pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
- sta->he_cap.he_cap_elem.phy_cap_info[9]);
+ sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
for (i = 0; i < RTW89_PPE_BW_NUM; i++)
pads[i] = pad;
+
+ return;
}
ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
@@ -831,7 +907,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
sh = n & 7;
n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
- ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx]));
+ ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
@@ -849,6 +925,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -860,7 +937,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
if (sta)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -881,17 +958,26 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
else
SET_CMC_TBL_ULDL(skb->data, 0);
SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
- SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
- SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
- SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
+ SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
+ } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
+ SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
+ }
if (sta)
- SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
+ sta->deflink.he_cap.has_he);
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
- H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
@@ -909,9 +995,10 @@ fail:
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -930,7 +1017,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
- H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
@@ -963,7 +1050,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
}
bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
dev_kfree_skb_any(skb_beacon);
@@ -1017,7 +1104,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
self_role = rtwvif->self_role;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1059,7 +1146,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1103,7 +1190,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1136,7 +1223,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
return -ENOMEM;
@@ -1171,7 +1258,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
return -ENOMEM;
@@ -1201,7 +1288,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1272,7 +1359,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
return -ENOMEM;
@@ -1331,7 +1418,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
u8 *cmd;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
@@ -1399,7 +1486,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
return -ENOMEM;
@@ -1441,7 +1528,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
return -ENOMEM;
@@ -1481,7 +1568,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
return -ENOMEM;
@@ -1523,7 +1610,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
*id = alloc_id;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
return -ENOMEM;
@@ -1562,7 +1649,7 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
return -ENOMEM;
@@ -1626,7 +1713,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 *cmd;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
return -ENOMEM;
@@ -1675,7 +1762,7 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
u8 class = info->rf_path == RF_PATH_A ?
H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
return -ENOMEM;
@@ -1698,13 +1785,52 @@ fail:
return -EBUSY;
}
+int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_fw_h2c_rf_get_mccch *mccch;
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, sizeof(*mccch));
+ mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
+
+ mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]);
+ mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
+ mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
+ mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
+ mccch->current_channel = cpu_to_le32(rtwdev->hal.current_channel);
+ mccch->current_band_type = cpu_to_le32(rtwdev->hal.current_band_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
+ H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
+ sizeof(*mccch));
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
return -ENOMEM;
@@ -1731,7 +1857,7 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
{
struct sk_buff *skb;
- skb = rtw89_fw_h2c_alloc_skb_no_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
return -ENOMEM;
@@ -2065,7 +2191,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
ch_info->num_pkt = 0;
break;
case RTW89_CHAN_DFS:
- ch_info->period = min_t(u8, ch_info->period,
+ ch_info->period = max_t(u8, ch_info->period,
RTW89_DFS_CHAN_TIME);
ch_info->dwell_time = RTW89_DWELL_TIME;
break;
@@ -2254,3 +2380,38 @@ void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
scan_info->op_bw = hal->current_band_width;
scan_info->op_band = hal->current_band_type;
}
+
+#define H2C_FW_CPU_EXCEPTION_LEN 4
+#define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
+int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for fw cpu exception\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
+ RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
+ H2C_FW_CPU_EXCEPTION_TYPE_DEF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_TEST,
+ H2C_CL_FW_STATUS_TEST,
+ H2C_FUNC_CPU_EXCEPTION, 0, 0,
+ H2C_FW_CPU_EXCEPTION_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index ed8609b204e0..95a55c4213db 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -973,6 +973,36 @@ static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D,
BIT(31));
}
+
+#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(1, 0));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(1, 0));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(3, 2));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(3, 2));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(5, 4));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(5, 4));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 6));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(7, 6));
+}
+
#define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0)
static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val)
{
@@ -1001,7 +1031,6 @@ static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL,
GENMASK(19, 18));
}
-#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20));
@@ -1106,13 +1135,14 @@ static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF,
GENMASK(27, 25));
}
-#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0)
-static inline void SET_CMC_TBL_CSI_GID_SEL(void *table, u32 val)
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160(void *table, u32 val)
{
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29));
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL,
- BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(29, 28));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(29, 28));
}
+
#define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0)
static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
{
@@ -1121,6 +1151,308 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+static inline void SET_DCTL_MACID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
+}
+
+static inline void SET_DCTL_OPERATION_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
+}
+
+#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0)
+static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1,
+ GENMASK(7, 0));
+}
+
+#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0)
+static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID,
+ GENMASK(14, 8));
+}
+
+#define SET_DCTL_MASK_QOS_DATA BIT(0)
+static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA,
+ BIT(15));
+}
+
+#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0)
+static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L,
+ GENMASK(31, 16));
+}
+
+#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0)
+static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H,
+ GENMASK(31, 0));
+}
+
+#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ0_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0,
+ GENMASK(11, 0));
+}
+
+#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ1_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1,
+ GENMASK(23, 12));
+}
+
+#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0)
+static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN,
+ GENMASK(26, 24));
+}
+
+#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0)
+static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN,
+ BIT(27));
+}
+
+#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0)
+static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN,
+ BIT(28));
+}
+
+#define SET_DCTL_MASK_WITH_LLC BIT(0)
+static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC,
+ BIT(29));
+}
+
+#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ2_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2,
+ GENMASK(11, 0));
+}
+
+#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ3_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3,
+ GENMASK(23, 12));
+}
+
+#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0)
+static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND,
+ GENMASK(27, 24));
+}
+
+#define SET_DCTL_MASK_TGT_IND_EN BIT(0)
+static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN,
+ BIT(28));
+}
+
+#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0)
+static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB,
+ GENMASK(31, 29));
+}
+
+#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0)
+static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN,
+ GENMASK(4, 0));
+}
+
+#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0)
+static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID,
+ BIT(5));
+}
+
+#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0)
+static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL,
+ GENMASK(7, 6));
+}
+
+#define SET_DCTL_MASK_HTC_ORDER BIT(0)
+static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER,
+ BIT(8));
+}
+
+#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0)
+static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID,
+ GENMASK(10, 9));
+}
+
+#define SET_DCTL_MASK_WAPI BIT(0)
+static inline void SET_DCTL_WAPI_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI,
+ BIT(15));
+}
+
+#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0)
+static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE,
+ GENMASK(17, 16));
+}
+
+#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0)
+static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(19, 18));
+}
+
+static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(21, 20));
+}
+
+static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(23, 22));
+}
+
+static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(25, 24));
+}
+
+static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(27, 26));
+}
+
+static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(29, 28));
+}
+
+static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(31, 30));
+}
+
+#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0)
+static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID,
+ GENMASK(7, 0));
+}
+
+#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0)
+static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(15, 8));
+}
+
+static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(23, 16));
+}
+
+static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(31, 24));
+}
+
+static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(7, 0));
+}
+
+static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(15, 8));
+}
+
+static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(23, 16));
+}
+
+static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(31, 24));
+}
+
static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1461,6 +1793,11 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
+}
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2140,6 +2477,12 @@ struct rtw89_fw_h2c_rf_reg_info {
#define FWCMD_TYPE_H2C 0
+#define H2C_CAT_TEST 0x0
+
+/* CLASS 5 - FW STATUS TEST */
+#define H2C_CL_FW_STATUS_TEST 0x5
+#define H2C_FUNC_CPU_EXCEPTION 0x1
+
#define H2C_CAT_MAC 0x1
/* CLASS 0 - FW INFO */
@@ -2159,6 +2502,8 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_CL_MAC_FR_EXCHG 0x5
#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
#define H2C_FUNC_MAC_BCN_UPD 0x5
+#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
+#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -2193,6 +2538,28 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
+#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
+#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+
+struct rtw89_fw_h2c_rf_get_mccch {
+ __le32 ch_0;
+ __le32 ch_1;
+ __le32 band_0;
+ __le32 band_1;
+ __le32 current_channel;
+ __le32 current_band_type;
+} __packed;
+
+#define RTW89_FW_RSVD_PLE_SIZE 0x800
+
+#define RTW89_WCPU_BASE_ADDR 0xA0000000
+
+#define RTW89_FW_BACKTRACE_INFO_SIZE 8
+#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \
+ ((_size) % RTW89_FW_BACKTRACE_INFO_SIZE == 0)
+
+#define RTW89_FW_BACKTRACE_MAX_SIZE 512 /* 8 * 64 (entries) */
+#define RTW89_FW_BACKTRACE_KEY 0xBACEBACE
int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev);
int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
@@ -2214,6 +2581,9 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
+int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -2243,6 +2613,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
+int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -2255,8 +2626,8 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
-struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
-struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *h2c_info,
struct rtw89_mac_c2h_info *c2h_info);
@@ -2273,5 +2644,6 @@ void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb);
void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 5e554bd9f036..05b94842fe66 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -10,6 +10,45 @@
#include "reg.h"
#include "util.h"
+const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = {
+ [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
+ [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
+ [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
+ [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
+ [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
+ [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR,
+};
+
+static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset,
+ u32 val, enum rtw89_mac_mem_sel sel)
+{
+ u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
+
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val);
+}
+
+static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset,
+ enum rtw89_mac_mem_sel sel)
+{
+ u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
+
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
+ return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY);
+}
+
int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_mac_hwmod_sel sel)
{
@@ -237,7 +276,9 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
u32 dmac_err, cmac_err;
if (err != MAC_AX_ERR_L1_ERR_DMAC &&
- err != MAC_AX_ERR_L0_PROMOTE_TO_L1)
+ err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC0 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC1)
return;
rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
@@ -438,7 +479,7 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
{
- u32 err;
+ u32 err, err_scnr;
int ret;
ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000,
@@ -451,6 +492,12 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
err = rtw89_read32(rtwdev, R_AX_HALT_C2H);
rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+ err_scnr = RTW89_ERROR_SCENARIO(err);
+ if (err_scnr == RTW89_WCPU_CPU_EXCEPTION)
+ err = MAC_AX_ERR_CPU_EXCEPTION;
+ else if (err_scnr == RTW89_WCPU_ASSERTION)
+ err = MAC_AX_ERR_ASSERTION;
+
rtw89_fw_st_dbg_dump(rtwdev);
rtw89_mac_dump_err_status(rtwdev, err);
@@ -482,11 +529,6 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
}
EXPORT_SYMBOL(rtw89_mac_set_err_status);
-const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = {
- 2, 40, 0, 0, 1, 0, 0, 0
-};
-EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie);
-
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
@@ -985,7 +1027,7 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
return 0;
rpwm_req_num = rtwdev->mac.rpwm_seq_num;
- cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM,
+ cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr,
PS_CPWM_RSP_SEQ_NUM);
if (rpwm_req_num != cpwm_rsp_seq)
@@ -994,11 +1036,11 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) &
CPWM_SEQ_NUM_MAX;
- cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM);
+ cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM);
if (cpwm_seq != rtwdev->mac.cpwm_seq_num)
return -EPERM;
- cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE);
+ cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE);
if (cpwm_status != req_pwr_state)
return -EPERM;
@@ -1091,7 +1133,8 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN |
- B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN;
+ B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN |
+ B_AX_CMAC_CRPRT;
ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN |
B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN |
B_AX_RMAC_CKEN;
@@ -1188,119 +1231,48 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
return ret;
}
-/* PCIE 64 */
-const struct rtw89_dle_size rtw89_wde_size0 = {
- RTW89_WDE_PG_64, 4095, 1,
-};
-EXPORT_SYMBOL(rtw89_wde_size0);
-
-/* DLFW */
-const struct rtw89_dle_size rtw89_wde_size4 = {
- RTW89_WDE_PG_64, 0, 4096,
-};
-EXPORT_SYMBOL(rtw89_wde_size4);
-
-/* 8852C DLFW */
-const struct rtw89_dle_size rtw89_wde_size18 = {
- RTW89_WDE_PG_64, 0, 2048,
-};
-EXPORT_SYMBOL(rtw89_wde_size18);
-
-/* 8852C PCIE SCC */
-const struct rtw89_dle_size rtw89_wde_size19 = {
- RTW89_WDE_PG_64, 3328, 0,
-};
-EXPORT_SYMBOL(rtw89_wde_size19);
-
-/* PCIE */
-const struct rtw89_dle_size rtw89_ple_size0 = {
- RTW89_PLE_PG_128, 1520, 16,
+const struct rtw89_mac_size_set rtw89_mac_size = {
+ .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0},
+ /* PCIE 64 */
+ .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
+ /* DLFW */
+ .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
+ /* 8852C DLFW */
+ .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
+ /* 8852C PCIE SCC */
+ .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
+ /* PCIE */
+ .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
+ /* DLFW */
+ .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
+ /* 8852C DLFW */
+ .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
+ /* 8852C PCIE SCC */
+ .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
+ /* PCIE 64 */
+ .wde_qt0 = {3792, 196, 0, 107,},
+ /* DLFW */
+ .wde_qt4 = {0, 0, 0, 0,},
+ /* 8852C DLFW */
+ .wde_qt17 = {0, 0, 0, 0,},
+ /* 8852C PCIE SCC */
+ .wde_qt18 = {3228, 60, 0, 40,},
+ /* PCIE SCC */
+ .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
+ /* PCIE SCC */
+ .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
+ /* DLFW */
+ .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
+ /* DLFW 52C */
+ .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
+ /* DLFW 52C */
+ .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
+ /* 8852C PCIE SCC */
+ .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
+ /* 8852C PCIE SCC */
+ .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
};
-EXPORT_SYMBOL(rtw89_ple_size0);
-
-/* DLFW */
-const struct rtw89_dle_size rtw89_ple_size4 = {
- RTW89_PLE_PG_128, 64, 1472,
-};
-EXPORT_SYMBOL(rtw89_ple_size4);
-
-/* 8852C DLFW */
-const struct rtw89_dle_size rtw89_ple_size18 = {
- RTW89_PLE_PG_128, 2544, 16,
-};
-EXPORT_SYMBOL(rtw89_ple_size18);
-
-/* 8852C PCIE SCC */
-const struct rtw89_dle_size rtw89_ple_size19 = {
- RTW89_PLE_PG_128, 1904, 16,
-};
-EXPORT_SYMBOL(rtw89_ple_size19);
-
-/* PCIE 64 */
-const struct rtw89_wde_quota rtw89_wde_qt0 = {
- 3792, 196, 0, 107,
-};
-EXPORT_SYMBOL(rtw89_wde_qt0);
-
-/* DLFW */
-const struct rtw89_wde_quota rtw89_wde_qt4 = {
- 0, 0, 0, 0,
-};
-EXPORT_SYMBOL(rtw89_wde_qt4);
-
-/* 8852C DLFW */
-const struct rtw89_wde_quota rtw89_wde_qt17 = {
- 0, 0, 0, 0,
-};
-EXPORT_SYMBOL(rtw89_wde_qt17);
-
-/* 8852C PCIE SCC */
-const struct rtw89_wde_quota rtw89_wde_qt18 = {
- 3228, 60, 0, 40,
-};
-EXPORT_SYMBOL(rtw89_wde_qt18);
-
-/* PCIE SCC */
-const struct rtw89_ple_quota rtw89_ple_qt4 = {
- 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
-};
-EXPORT_SYMBOL(rtw89_ple_qt4);
-
-/* PCIE SCC */
-const struct rtw89_ple_quota rtw89_ple_qt5 = {
- 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
-};
-EXPORT_SYMBOL(rtw89_ple_qt5);
-
-/* DLFW */
-const struct rtw89_ple_quota rtw89_ple_qt13 = {
- 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
-};
-EXPORT_SYMBOL(rtw89_ple_qt13);
-
-/* DLFW 52C */
-const struct rtw89_ple_quota rtw89_ple_qt44 = {
- 0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-EXPORT_SYMBOL(rtw89_ple_qt44);
-
-/* DLFW 52C */
-const struct rtw89_ple_quota rtw89_ple_qt45 = {
- 0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-EXPORT_SYMBOL(rtw89_ple_qt45);
-
-/* 8852C PCIE SCC */
-const struct rtw89_ple_quota rtw89_ple_qt46 = {
- 525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,
-};
-EXPORT_SYMBOL(rtw89_ple_qt46);
-
-/* 8852C PCIE SCC */
-const struct rtw89_ple_quota rtw89_ple_qt47 = {
- 525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,
-};
-EXPORT_SYMBOL(rtw89_ple_qt47);
+EXPORT_SYMBOL(rtw89_mac_size);
static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
@@ -1608,6 +1580,17 @@ static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
return false;
}
+static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ return;
+
+ rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK,
+ SS2F_PATH_WLCPU);
+}
+
static int sta_sch_init(struct rtw89_dev *rtwdev)
{
u32 p_val;
@@ -1630,6 +1613,9 @@ static int sta_sch_init(struct rtw89_dev *rtwdev)
}
rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG);
+ rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN);
+
+ _patch_ss2f_path(rtwdev);
return 0;
}
@@ -1653,6 +1639,7 @@ static int mpdu_proc_init(struct rtw89_dev *rtwdev)
static int sec_eng_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val = 0;
int ret;
@@ -1666,7 +1653,8 @@ static int sec_eng_init(struct rtw89_dev *rtwdev)
/* init TX encryption */
val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
val |= (B_AX_MC_DEC | B_AX_BC_DEC);
- val &= ~B_AX_TX_PARTIAL_MODE;
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ val &= ~B_AX_TX_PARTIAL_MODE;
rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val);
/* init MIC ICV append */
@@ -1676,6 +1664,10 @@ static int sec_eng_init(struct rtw89_dev *rtwdev)
/* option init */
rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val);
+ if (chip->chip_id == RTL8852C)
+ rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1,
+ B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL);
+
return 0;
}
@@ -1758,6 +1750,17 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
if (ret)
return ret;
+ reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1);
+
+ if (rtwdev->chip->chip_id == RTL8852B) {
+ reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV);
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN);
+
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US);
@@ -1899,6 +1902,16 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
+static int nav_ctrl_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
+ B_AX_WMAC_TF_UP_NAV_EN |
+ B_AX_WMAC_NAV_UPPER_EN);
+ rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_12MS);
+
+ return 0;
+}
+
static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
@@ -1925,6 +1938,13 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx);
rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN);
+ reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE);
+
return 0;
}
@@ -1963,6 +1983,21 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
+static void rst_bacam(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK,
+ S_AX_BACAM_RST_ALL);
+
+ ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0,
+ 1, 1000, false,
+ rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to reset BA CAM\n");
+}
+
static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
#define TRXCFG_RMAC_CCA_TO 32
@@ -1977,6 +2012,9 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
if (ret)
return ret;
+ if (mac_idx == RTW89_MAC_0)
+ rst_bacam(rtwdev);
+
reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx);
rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL);
@@ -2064,6 +2102,8 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
val = rtw89_read32(rtwdev, reg);
val = u32_replace_bits(val, S_AX_CTS2S_TH_1K,
B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK);
+ val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B,
+ B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
val |= B_AX_HW_CTS2SELF_EN;
rtw89_write32(rtwdev, reg, val);
@@ -2074,11 +2114,19 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val);
}
- reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx);
- val = rtw89_read32(rtwdev, reg);
- val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
- val |= B_AX_HW_CTS2SELF_EN;
- rtw89_write32(rtwdev, reg, val);
+ if (mac_idx == RTW89_MAC_0) {
+ rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
+ B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1);
+ rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
+ B_AX_PTCL_TRIGGER_SS_EN_0 |
+ B_AX_PTCL_TRIGGER_SS_EN_1 |
+ B_AX_PTCL_TRIGGER_SS_EN_UL);
+ rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL,
+ B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
+ } else if (mac_idx == RTW89_MAC_1) {
+ rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1,
+ B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
+ }
return 0;
}
@@ -2114,6 +2162,13 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = nav_ctrl_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
ret = spatial_reuse_init(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
@@ -2589,10 +2644,206 @@ static int band1_enable(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR);
+ rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set);
+}
+
+static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set);
+}
+
+static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ B_AX_TX_GET_ERRPKTID_INT_EN |
+ B_AX_TX_NXT_ERRPKTID_INT_EN |
+ B_AX_TX_MPDU_SIZE_ZERO_INT_EN |
+ B_AX_TX_OFFSET_ERR_INT_EN |
+ B_AX_TX_HDR3_SIZE_ERR_INT_EN);
+ if (chip_id == RTL8852C)
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ B_AX_TX_ETH_TYPE_ERR_EN |
+ B_AX_TX_LLC_PRE_ERR_EN |
+ B_AX_TX_NW_TYPE_ERR_EN |
+ B_AX_TX_KSRCH_ERR_EN);
+ rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ imr->mpdu_tx_imr_set);
+
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR,
+ B_AX_GETPKTID_ERR_INT_EN |
+ B_AX_MHDRLEN_ERR_INT_EN |
+ B_AX_RPT_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR,
+ imr->mpdu_rx_imr_set);
+}
+
+static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
+ B_AX_SEARCH_HANG_TIMEOUT_INT_EN |
+ B_AX_RPT_HANG_TIMEOUT_INT_EN |
+ B_AX_PLE_B_PKTID_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
+ imr->sta_sch_imr_set);
+}
+
+static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg,
+ imr->txpktctl_imr_b0_clr);
+ rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg,
+ imr->txpktctl_imr_b0_set);
+ rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg,
+ imr->txpktctl_imr_b1_clr);
+ rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg,
+ imr->txpktctl_imr_b1_set);
+}
+
+static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set);
+}
+
+static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set);
+}
+
+static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR,
+ B_AX_PKTIN_GETPKTID_ERR_INT_EN);
+}
+
+static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
+ imr->host_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
+ imr->host_disp_imr_set);
+ rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
+ imr->cpu_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
+ imr->cpu_disp_imr_set);
+ rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
+ imr->other_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
+ imr->other_disp_imr_set);
+}
+
+static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR);
+ rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET);
+}
+
+static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_set(rtwdev, R_AX_BBRPT_COM_ERR_IMR,
+ B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
+ B_AX_BBRPT_CHINFO_IMR_CLR);
+ rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
+ imr->bbrpt_err_imr_set);
+ rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg,
+ B_AX_BBRPT_DFS_TO_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR);
+}
+
+static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN |
+ B_AX_FSM_TIMEOUT_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN);
+}
+
+static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set);
+}
+
+static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr);
+ rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set);
+
+ if (chip_id == RTL8852C) {
+ reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr);
+ rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set);
+ }
+}
+
+static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set);
+}
+
+static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set);
+}
+
+static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set);
+}
+
static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_mac_hwmod_sel sel)
{
- u32 reg, val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel);
@@ -2603,60 +2854,24 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
}
if (sel == RTW89_DMAC_SEL) {
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
- B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
- B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN |
- B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
- B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
- B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
- B_AX_HDT_PKT_FAIL_DBG_INT_EN |
- B_AX_HDT_OFFSET_UNMATCH_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
- B_AX_CPU_SHIFT_EN_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR,
- B_AX_PLE_GETNPG_STRPG_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR,
- B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN);
- rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
- B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN);
+ rtw89_wdrls_imr_enable(rtwdev);
+ rtw89_wsec_imr_enable(rtwdev);
+ rtw89_mpdu_trx_imr_enable(rtwdev);
+ rtw89_sta_sch_imr_enable(rtwdev);
+ rtw89_txpktctl_imr_enable(rtwdev);
+ rtw89_wde_imr_enable(rtwdev);
+ rtw89_ple_imr_enable(rtwdev);
+ rtw89_pktin_imr_enable(rtwdev);
+ rtw89_dispatcher_imr_enable(rtwdev);
+ rtw89_cpuio_imr_enable(rtwdev);
+ rtw89_bbrpt_imr_enable(rtwdev);
} else if (sel == RTW89_CMAC_SEL) {
- reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
- rtw89_write32_clr(rtwdev, reg,
- B_AX_SORT_NON_IDLE_ERR_INT_EN);
-
- reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx);
- rtw89_write32_clr(rtwdev, reg,
- B_AX_NO_RESERVE_PAGE_ERR_IMR |
- B_AX_RXDATA_FSM_HANG_ERROR_IMR);
-
- reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
- val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN |
- B_AX_TX_RECORD_PKTID_ERR_INT_EN |
- B_AX_FSM_TIMEOUT_ERR_INT_EN;
- rtw89_write32(rtwdev, reg, val);
-
- reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx);
- rtw89_write32_set(rtwdev, reg,
- B_AX_PHY_TXON_TIMEOUT_INT_EN |
- B_AX_CCK_CCA_TIMEOUT_INT_EN |
- B_AX_OFDM_CCA_TIMEOUT_INT_EN |
- B_AX_DATA_ON_TIMEOUT_INT_EN |
- B_AX_STS_ON_TIMEOUT_INT_EN |
- B_AX_CSI_ON_TIMEOUT_INT_EN);
-
- reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx);
- val = rtw89_read32(rtwdev, reg);
- val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN |
- B_AX_RMAC_RX_TIMEOUT_INT_EN |
- B_AX_RMAC_CSI_TIMEOUT_INT_EN);
- val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN |
- B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN |
- B_AX_RMAC_CCA_TIMEOUT_INT_EN |
- B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN);
- rtw89_write32(rtwdev, reg, val);
+ rtw89_scheduler_imr_enable(rtwdev, mac_idx);
+ rtw89_ptcl_imr_enable(rtwdev, mac_idx);
+ rtw89_cdma_imr_enable(rtwdev, mac_idx);
+ rtw89_phy_intf_imr_enable(rtwdev, mac_idx);
+ rtw89_rmac_imr_enable(rtwdev, mac_idx);
+ rtw89_tmac_imr_enable(rtwdev, mac_idx);
} else {
return -EINVAL;
}
@@ -2664,6 +2879,19 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR,
+ en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS);
+ rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR,
+ en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS);
+ if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta)
+ rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1,
+ en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
+}
+
static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
{
int ret = 0;
@@ -2745,6 +2973,8 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_mac_err_imr_ctrl(rtwdev, true);
+
ret = set_host_rpr(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
@@ -2754,6 +2984,19 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+
+ rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL,
+ WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL);
+
+ val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL);
+ val32 |= B_AX_FS_WDT_INT;
+ val32 &= ~B_AX_FS_WDT_INT_MSK;
+ rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL);
+}
+
static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
{
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
@@ -2762,6 +3005,9 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+
+ rtw89_disable_fw_watchdog(rtwdev);
+
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
}
@@ -2804,18 +3050,41 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
return 0;
}
-static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
+static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val;
int ret;
- val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
- B_AX_PKT_BUF_EN;
+ if (chip_id == RTL8852C)
+ val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
+ B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN;
+ else
+ val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
+ B_AX_PKT_BUF_EN;
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val);
val = B_AX_DISPATCHER_CLK_EN;
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val);
+ if (chip_id != RTL8852C)
+ goto dle;
+
+ val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1);
+ val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST);
+ val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) |
+ B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1;
+ rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val);
+
+ rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1,
+ B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 |
+ B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 |
+ B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 |
+ B_AX_STOP_CH12 | B_AX_STOP_ACH2);
+ rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN);
+
+dle:
ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret);
@@ -2839,7 +3108,7 @@ static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
}
-void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2847,7 +3116,10 @@ void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+
+ return 0;
}
+EXPORT_SYMBOL(rtw89_mac_enable_bb_rf);
void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
@@ -2858,6 +3130,7 @@ void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
}
+EXPORT_SYMBOL(rtw89_mac_disable_bb_rf);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
{
@@ -2873,16 +3146,16 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
rtw89_mac_hci_func_en(rtwdev);
+ ret = rtw89_mac_dmac_pre_init(rtwdev);
+ if (ret)
+ return ret;
+
if (rtwdev->hci.ops->mac_pre_init) {
ret = rtwdev->hci.ops->mac_pre_init(rtwdev);
if (ret)
return ret;
}
- ret = rtw89_mac_fw_dl_pre_init(rtwdev);
- if (ret)
- return ret;
-
rtw89_mac_disable_cpu(rtwdev);
ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
if (ret)
@@ -2903,7 +3176,9 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
- rtw89_mac_enable_bb_rf(rtwdev);
+ ret = rtw89_chip_enable_bb_rf(rtwdev);
+ if (ret)
+ goto fail;
ret = rtw89_mac_sys_init(rtwdev);
if (ret)
@@ -3451,12 +3726,18 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
}
+static void
+rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
[RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
- [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
+ [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
@@ -3692,6 +3973,34 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
}
EXPORT_SYMBOL(rtw89_mac_coex_init);
+int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex *coex)
+{
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG,
+ B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL);
+ rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN);
+ rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN);
+ rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN);
+
+ switch (coex->pta_mode) {
+ case RTW89_MAC_AX_COEX_RTK_MODE:
+ rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
+ MAC_AX_RTK_MODE);
+ rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1,
+ B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE);
+ break;
+ case RTW89_MAC_AX_COEX_CSR_MODE:
+ rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
+ MAC_AX_CSR_MODE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_coex_init_v1);
+
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
@@ -3942,7 +4251,7 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
u8 port_sel = rtwvif->port;
u8 sound_dim = 3, t;
- u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info;
+ u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
u32 reg;
u16 val;
int ret;
@@ -3959,12 +4268,12 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
phy_cap[5]);
sound_dim = min(sound_dim, t);
}
- if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
- ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
+ if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
sound_dim = min(sound_dim, t);
}
nc = min(nc, sound_dim);
@@ -4005,17 +4314,17 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev,
if (ret)
return ret;
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index b797667c78c6..9eb4afe348b3 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -245,6 +245,7 @@ enum rtw89_mac_dbg_port_sel {
#define TXD_FIFO_1_BASE_ADDR 0x188A1080
#define TXDATA_FIFO_0_BASE_ADDR 0x18856000
#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000
+#define CPU_LOCAL_BASE_ADDR 0x18003000
#define CCTL_INFO_SIZE 32
@@ -266,13 +267,14 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXD_FIFO_1,
RTW89_MAC_MEM_TXDATA_FIFO_0,
RTW89_MAC_MEM_TXDATA_FIFO_1,
+ RTW89_MAC_MEM_CPU_LOCAL,
/* keep last */
- RTW89_MAC_MEM_LAST,
- RTW89_MAC_MEM_MAX = RTW89_MAC_MEM_LAST,
- RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST,
+ RTW89_MAC_MEM_NUM,
};
+extern const u32 rtw89_mac_mem_base_addrs[];
+
enum rtw89_rpwm_req_pwr_state {
RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0,
RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1,
@@ -519,6 +521,13 @@ struct rtw89_mac_dle_dfi_qempty {
u32 qempty;
};
+enum rtw89_mac_error_scenario {
+ RTW89_WCPU_CPU_EXCEPTION = 2,
+ RTW89_WCPU_ASSERTION = 3,
+};
+
+#define RTW89_ERROR_SCENARIO(__err) ((__err) >> 28)
+
/* Define DBG and recovery enum */
enum mac_ax_err_info {
/* Get error info */
@@ -657,6 +666,7 @@ enum mac_ax_err_info {
MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370,
MAC_AX_ERR_L2_RESET_DONE = 0x2400,
MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
+ MAC_AX_ERR_ASSERTION = 0x4000,
MAC_AX_GET_ERR_MAX,
MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000,
@@ -672,26 +682,30 @@ enum mac_ax_err_info {
MAC_AX_SET_ERR_MAX,
};
-extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie;
-extern const struct rtw89_dle_size rtw89_wde_size0;
-extern const struct rtw89_dle_size rtw89_wde_size4;
-extern const struct rtw89_dle_size rtw89_wde_size18;
-extern const struct rtw89_dle_size rtw89_wde_size19;
-extern const struct rtw89_dle_size rtw89_ple_size0;
-extern const struct rtw89_dle_size rtw89_ple_size4;
-extern const struct rtw89_dle_size rtw89_ple_size18;
-extern const struct rtw89_dle_size rtw89_ple_size19;
-extern const struct rtw89_wde_quota rtw89_wde_qt0;
-extern const struct rtw89_wde_quota rtw89_wde_qt4;
-extern const struct rtw89_wde_quota rtw89_wde_qt17;
-extern const struct rtw89_wde_quota rtw89_wde_qt18;
-extern const struct rtw89_ple_quota rtw89_ple_qt4;
-extern const struct rtw89_ple_quota rtw89_ple_qt5;
-extern const struct rtw89_ple_quota rtw89_ple_qt13;
-extern const struct rtw89_ple_quota rtw89_ple_qt44;
-extern const struct rtw89_ple_quota rtw89_ple_qt45;
-extern const struct rtw89_ple_quota rtw89_ple_qt46;
-extern const struct rtw89_ple_quota rtw89_ple_qt47;
+struct rtw89_mac_size_set {
+ const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie;
+ const struct rtw89_dle_size wde_size0;
+ const struct rtw89_dle_size wde_size4;
+ const struct rtw89_dle_size wde_size18;
+ const struct rtw89_dle_size wde_size19;
+ const struct rtw89_dle_size ple_size0;
+ const struct rtw89_dle_size ple_size4;
+ const struct rtw89_dle_size ple_size18;
+ const struct rtw89_dle_size ple_size19;
+ const struct rtw89_wde_quota wde_qt0;
+ const struct rtw89_wde_quota wde_qt4;
+ const struct rtw89_wde_quota wde_qt17;
+ const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_ple_quota ple_qt4;
+ const struct rtw89_ple_quota ple_qt5;
+ const struct rtw89_ple_quota ple_qt13;
+ const struct rtw89_ple_quota ple_qt44;
+ const struct rtw89_ple_quota ple_qt45;
+ const struct rtw89_ple_quota ple_qt46;
+ const struct rtw89_ple_quota ple_qt47;
+};
+
+extern const struct rtw89_mac_size_set rtw89_mac_size;
static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
{
@@ -783,8 +797,23 @@ int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
-void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
+int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+
+static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->enable_bb_rf(rtwdev);
+}
+
+static inline void rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->disable_bb_rf(rtwdev);
+}
+
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
@@ -800,6 +829,8 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable)
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
+int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex *coex);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
@@ -889,6 +920,8 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta, u8 *tx_retry);
enum rtw89_mac_xtal_si_offset {
+ XTAL0 = 0x0,
+ XTAL3 = 0x3,
XTAL_SI_XTAL_SC_XI = 0x04,
#define XTAL_SC_XI_MASK GENMASK(7, 0)
XTAL_SI_XTAL_SC_XO = 0x05,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index fca9f82bb462..8da3e117ad38 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -725,7 +725,7 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtw89_dev *rtwdev = hw->priv;
int ret = 0;
- if (!rtwdev->fw.scan_offload)
+ if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return 1;
if (rtwdev->scanning)
@@ -748,7 +748,7 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- if (!rtwdev->fw.scan_offload)
+ if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return;
if (!rtwdev->scanning)
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index e79bfc335b44..2bdce7024f25 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -382,6 +382,10 @@ static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx
}
list_del_init(&txwd->list);
+
+ /* this skb has been freed by RPP */
+ if (skb_queue_len(&txwd->queue) == 0)
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
}
@@ -412,16 +416,13 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
u8 txch = tx_ring->txch;
if (!list_empty(&txwd->list)) {
- rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
- txch, seq);
- return;
- }
-
- /* currently, support for only one frame */
- if (skb_queue_len(&txwd->queue) != 1) {
- rtw89_warn(rtwdev, "empty pending queue %d page %d\n",
- txch, seq);
- return;
+ rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
+ /* In low power mode, RPP can receive before updating of TX BD.
+ * In normal mode, it should not happen so give it a warning.
+ */
+ if (!rtwpci->low_power && !list_empty(&txwd->list))
+ rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
+ txch, seq);
}
skb_queue_walk_safe(&txwd->queue, skb, tmp) {
@@ -434,7 +435,8 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
}
- rtw89_pci_enqueue_txwd(tx_ring, txwd);
+ if (list_empty(&txwd->list))
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
@@ -458,7 +460,6 @@ static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
}
tx_ring = &rtwpci->tx_rings[txch];
- rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
wd_ring = &tx_ring->wd_ring;
txwd = &wd_ring->pages[seq];
@@ -612,9 +613,9 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci,
- struct rtw89_pci_isrs *isrs)
+void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
{
isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
@@ -624,6 +625,28 @@ static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
+
+void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
+ isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
+ isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
+ isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
+
+ if (isrs->halt_c2h_isrs)
+ rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
+ if (isrs->isrs[0])
+ rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
+ if (isrs->isrs[1])
+ rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
+}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
{
@@ -631,21 +654,75 @@ static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
}
-static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci)
+void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
}
+EXPORT_SYMBOL(rtw89_pci_enable_intr);
-static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci)
+void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
}
+EXPORT_SYMBOL(rtw89_pci_disable_intr);
+
+void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
+ rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
+ rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
+}
+EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
+
+void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
+ rtw89_write32(rtwdev, R_AX_HIMR0, 0);
+ rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, 0);
+ rtw89_write32(rtwdev, R_AX_HIMR1, 0);
+}
+EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
+
+static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ int budget = NAPI_POLL_WEIGHT;
+
+ /* To prevent RXQ get stuck due to run out of budget. */
+ rtwdev->napi_budget_countdown = budget;
+
+ rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
+ rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
+}
static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
{
@@ -655,7 +732,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
- rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs);
+ rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
@@ -664,6 +741,14 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
+ if (unlikely(rtwpci->under_recovery))
+ goto enable_intr;
+
+ if (unlikely(rtwpci->low_power)) {
+ rtw89_pci_low_power_interrupt_handler(rtwdev);
+ goto enable_intr;
+ }
+
if (likely(rtwpci->running)) {
local_bh_disable();
napi_schedule(&rtwdev->napi);
@@ -671,6 +756,12 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
}
return IRQ_HANDLED;
+
+enable_intr:
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ return IRQ_HANDLED;
}
static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
@@ -690,7 +781,7 @@ static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
goto exit;
}
- rtw89_pci_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
exit:
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
@@ -827,6 +918,21 @@ u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
return cnt;
}
+static
+u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
+ u8 txch)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ u32 cnt;
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ return cnt;
+}
+
static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
@@ -848,6 +954,10 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
if (!cnt)
goto out_unlock;
rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+
+ bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ if (bd_cnt == 0)
+ rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
}
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
@@ -865,6 +975,9 @@ out_unlock:
static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
+ if (rtwdev->hci.paused)
+ return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
+
if (txch == RTW89_TXCH_CH12)
return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
@@ -873,12 +986,17 @@ static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, addr;
+ spin_lock_bh(&rtwpci->trx_lock);
+
addr = bd_ring->addr.idx;
host_idx = bd_ring->wp;
rtw89_write16(rtwdev, addr, host_idx);
+
+ spin_unlock_bh(&rtwpci->trx_lock);
}
static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
@@ -899,9 +1017,27 @@ static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
- spin_lock_bh(&rtwpci->trx_lock);
+ if (rtwdev->hci.paused) {
+ set_bit(txch, rtwpci->kick_map);
+ return;
+ }
+
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
- spin_unlock_bh(&rtwpci->trx_lock);
+}
+
+static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ int txch;
+
+ for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (!test_and_clear_bit(txch, rtwpci->kick_map))
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[txch];
+ __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
+ }
}
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
@@ -951,17 +1087,69 @@ static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
}
+u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
+
+ txaddr_info->length = cpu_to_le16(total_len);
+ txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
+ RTW89_PCI_ADDR_NUM(1));
+ txaddr_info->dma = cpu_to_le32(dma);
+
+ *add_info_nr = 1;
+
+ return sizeof(*txaddr_info);
+}
+EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
+
+u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
+ u32 remain = total_len;
+ u32 len;
+ u16 length_option;
+ int n;
+
+ for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
+ len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
+ TXADDR_INFO_LENTHG_V1_MAX : remain;
+ remain -= len;
+
+ length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
+ FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
+ FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
+ txaddr_info->length_opt = cpu_to_le16(length_option);
+ txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
+ txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
+
+ dma += len;
+ txaddr_info++;
+ }
+
+ WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
+ remain, total_len);
+
+ *add_info_nr = n;
+
+ return n * sizeof(*txaddr_info);
+}
+EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
+
static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_wd *txwd,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
- struct rtw89_txwd_body *txwd_body;
struct rtw89_txwd_info *txwd_info;
struct rtw89_pci_tx_wp_info *txwp_info;
- struct rtw89_pci_tx_addr_info_32 *txaddr_info;
+ void *txaddr_info_addr;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
@@ -972,8 +1160,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
dma_addr_t dma;
int ret;
- rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
-
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
rtw89_err(rtwdev, "failed to map skb dma data\n");
@@ -983,9 +1169,8 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
tx_data->dma = dma;
- txaddr_info_len = sizeof(*txaddr_info);
txwp_len = sizeof(*txwp_info);
- txwd_len = sizeof(*txwd_body);
+ txwd_len = chip->txwd_body_size;
txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
txwp_info = txwd->vaddr + txwd_len;
@@ -995,14 +1180,15 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
txwp_info->seq3 = 0;
tx_ring->tx_cnt++;
- txaddr_info = txwd->vaddr + txwd_len + txwp_len;
- txaddr_info->length = cpu_to_le16(skb->len);
- txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
- RTW89_PCI_ADDR_NUM(1));
- txaddr_info->dma = cpu_to_le32(dma);
+ txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
+ txaddr_info_len =
+ rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
+ dma, &desc_info->addr_info_nr);
txwd->len = txwd_len + txwp_len + txaddr_info_len;
+ rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
+
skb_queue_tail(&txwd->queue, skb);
return 0;
@@ -1017,16 +1203,18 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
- struct rtw89_txwd_body *txwd_body;
+ void *txdesc;
+ int txdesc_size = chip->h2c_desc_size;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_addr_t dma;
- txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body));
- memset(txwd_body, 0, sizeof(*txwd_body));
- rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body);
+ txdesc = skb_push(skb, txdesc_size);
+ memset(txdesc, 0, txdesc_size);
+ rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
@@ -1235,36 +1423,102 @@ static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
spin_unlock_bh(&rtwpci->trx_lock);
}
-static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
+static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
- rtw89_core_napi_start(rtwdev);
-
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = true;
- rtw89_pci_enable_intr(rtwdev, rtwpci);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
-
- return 0;
}
-static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
+static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct pci_dev *pdev = rtwpci->pdev;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = false;
- rtw89_pci_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
+{
+ rtw89_core_napi_start(rtwdev);
+ rtw89_pci_enable_intr_lock(rtwdev);
+
+ return 0;
+}
+
+static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ rtw89_pci_disable_intr_lock(rtwdev);
synchronize_irq(pdev->irq);
rtw89_core_napi_stop(rtwdev);
}
+static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ if (pause) {
+ rtw89_pci_disable_intr_lock(rtwdev);
+ synchronize_irq(pdev->irq);
+ if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
+ napi_synchronize(&rtwdev->napi);
+ } else {
+ rtw89_pci_enable_intr_lock(rtwdev);
+ rtw89_pci_tx_kick_off_pending(rtwdev);
+ }
+}
+
+static
+void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
+ const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
+ struct rtw89_pci_tx_ring *tx_ring;
+ struct rtw89_pci_rx_ring *rx_ring;
+ int i;
+
+ if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
+ return;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ tx_ring->bd_ring.addr.idx = low_power ?
+ bd_idx_addr->tx_bd_addrs[i] :
+ dma_addr_set->tx[i].idx;
+ }
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rx_ring->bd_ring.addr.idx = low_power ?
+ bd_idx_addr->rx_bd_addrs[i] :
+ dma_addr_set->rx[i].idx;
+ }
+}
+
+static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
+{
+ enum rtw89_pci_intr_mask_cfg cfg;
+
+ WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
+
+ cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
+ rtw89_chip_config_intr_mask(rtwdev, cfg);
+ rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
+}
+
static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
@@ -1348,16 +1602,30 @@ static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 txhci_en = info->txhci_en_bit;
+ u32 rxhci_en = info->rxhci_en_bit;
+
if (enable) {
+ if (chip_id != RTL8852C)
+ rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
+ B_AX_STOP_PCIEIO);
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_TXHCI_EN | B_AX_RXHCI_EN);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
- B_AX_STOP_PCIEIO);
+ txhci_en | rxhci_en);
+ if (chip_id == RTL8852C)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_STOP_AXI_MST);
} else {
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1,
- B_AX_STOP_PCIEIO);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_TXHCI_EN | B_AX_RXHCI_EN);
+ if (chip_id != RTL8852C)
+ rtw89_write32_set(rtwdev, info->dma_stop1_reg,
+ B_AX_STOP_PCIEIO);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_STOP_AXI_MST);
+ if (chip_id == RTL8852C)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_STOP_AXI_MST);
}
}
@@ -1422,6 +1690,28 @@ rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
return 0;
}
+static int
+rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
+{
+ u32 shift;
+ int ret;
+ u16 val;
+
+ ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
+ if (ret)
+ return ret;
+
+ shift = __ffs(mask);
+ val &= ~mask;
+ val |= ((data << shift) & mask);
+
+ ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
{
int ret;
@@ -1550,8 +1840,7 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
bool l1_flag = false;
int ret = 0;
- if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) ||
- rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852B)
return 0;
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
@@ -1696,31 +1985,39 @@ end:
static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (rtwdev->chip->chip_id != RTL8852A)
- return 0;
-
- ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
- PCIE_PHY_GEN1);
- if (ret)
- return ret;
- ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
- PCIE_PHY_GEN2);
- if (ret)
- return ret;
+ if (chip_id == RTL8852A) {
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN1);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN2);
+ if (ret)
+ return ret;
+ } else if (chip_id == RTL8852C) {
+ rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
+ B_AX_DEGLITCH);
+ rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
+ B_AX_DEGLITCH);
+ }
return 0;
}
static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
{
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return;
+
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
}
static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
return;
rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
@@ -1730,7 +2027,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
{
int ret;
- if (rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852A)
return 0;
ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
@@ -1756,13 +2053,78 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id != RTL8852A)
+ if (rtwdev->chip->chip_id == RTL8852A ||
+ rtwdev->chip->chip_id == RTL8852B) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_WLSUS_AFT_PDN);
+ } else if (rtwdev->chip->chip_id == RTL8852C) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ }
+}
+
+static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852B)
+ return 0;
+
+ return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
+ PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
+}
+
+static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+{
+ if (pwr_up)
+ rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+}
+
+static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
return;
- rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_WLSUS_AFT_PDN);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
+}
+
+static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
+{
+ if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
+}
+
+static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
+{
+ if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
+ B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
+ B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
+}
+
+static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
}
static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
@@ -1774,6 +2136,52 @@ static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
B_AX_SIC_EN_FORCE_CLKREQ);
}
+static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 lbc;
+
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
+ if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
+ lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
+ lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
+ rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
+ } else {
+ lbc &= ~B_AX_LBC_EN;
+ }
+ rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
+}
+
+static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 val32;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
+ val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
+ info->io_rcy_tmr);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
+ } else {
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
+ }
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
+}
+
static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
{
if (rtwdev->chip->chip_id == RTL8852C)
@@ -1787,30 +2195,197 @@ static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
B_AX_EN_CHKDSC_NO_RX_STUCK);
}
+static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
+}
+
static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
B_AX_CLR_CH12_IDX;
+ u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
+ u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
- if (rtwdev->chip->chip_id == RTL8852A)
+ if (chip_id == RTL8852A || chip_id == RTL8852C)
val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
/* clear DMA indexes */
rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
- if (rtwdev->chip->chip_id == RTL8852A)
- rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2,
+ if (chip_id == RTL8852A || chip_id == RTL8852C)
+ rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
- rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR,
+ rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
}
+static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 ret, check, dma_busy;
+ u32 dma_busy1 = info->dma_busy1_reg;
+ u32 dma_busy2 = info->dma_busy2_reg;
+
+ check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY |
+ B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY |
+ B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY |
+ B_AX_CH9_BUSY | B_AX_CH12_BUSY;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy1);
+ if (ret)
+ return ret;
+
+ check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 ret, check, dma_busy;
+ u32 dma_busy3 = info->dma_busy3_reg;
+
+ check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy3);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
+{
+ u32 ret;
+
+ ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "txdma ch busy\n");
+ return ret;
+ }
+
+ ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "rxdma ch busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
+ enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
+ enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
+ enum mac_ax_tag_mode tag_mode = info->tag_mode;
+ enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
+ enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
+ enum mac_ax_tx_burst tx_burst = info->tx_burst;
+ enum mac_ax_rx_burst rx_burst = info->rx_burst;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 cv = rtwdev->hal.cv;
+ u32 val32;
+
+ if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ if (chip_id == RTL8852A && cv == CHIP_CBV)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
+ } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
+ }
+
+ if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ if (chip_id == RTL8852A && cv == CHIP_CBV)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
+ } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
+ }
+
+ if (rxbd_mode == MAC_AX_RXBD_PKT) {
+ rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
+ } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
+ rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
+ B_AX_PCIE_RX_APPLEN_MASK, 0);
+ }
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
+ }
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (tag_mode == MAC_AX_TAG_SGL) {
+ val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
+ ~B_AX_LATENCY_CONTROL;
+ rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ } else if (tag_mode == MAC_AX_TAG_MULTI) {
+ val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
+ B_AX_LATENCY_CONTROL;
+ rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ }
+ }
+
+ rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
+ info->multi_tag_num);
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
+ wd_dma_idle_intvl);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
+ wd_dma_act_intvl);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
+ wd_dma_idle_intvl);
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
+ wd_dma_act_intvl);
+ }
+
+ if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
+ rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ }
+
+ return 0;
+}
+
static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
}
+ info->ltr_set(rtwdev, false);
rtw89_pci_ctrl_dma_all(rtwdev, false);
rtw89_pci_clr_idx_all(rtwdev);
@@ -1819,9 +2394,7 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
{
- u32 dma_busy;
- u32 check;
- u32 lbc;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
rtw89_pci_rxdma_prefth(rtwdev);
@@ -1835,6 +2408,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_aphy_pwrcut(rtwdev);
rtw89_pci_hci_ldo(rtwdev);
+ rtw89_pci_dphy_delay(rtwdev);
ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
if (ret) {
@@ -1842,50 +2416,31 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_pci_power_wake(rtwdev, true);
+ rtw89_pci_autoload_hang(rtwdev);
+ rtw89_pci_l12_vmain(rtwdev);
+ rtw89_pci_gen2_force_ib(rtwdev);
+ rtw89_pci_l1_ent_lat(rtwdev);
+ rtw89_pci_wd_exit_l1(rtwdev);
rtw89_pci_set_sic(rtwdev);
+ rtw89_pci_set_lbc(rtwdev);
+ rtw89_pci_set_io_rcy(rtwdev);
rtw89_pci_set_dbg(rtwdev);
+ rtw89_pci_set_keep_reg(rtwdev);
- if (rtwdev->chip->chip_id == RTL8852A)
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_AUXCLK_GATE);
-
- lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
- lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER);
- lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
- rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
-
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA);
+ rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA);
/* stop DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, false);
- /* check PCI at idle state */
- check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
- ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
- 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1);
+ ret = rtw89_pci_poll_dma_all_idle(rtwdev);
if (ret) {
- rtw89_err(rtwdev, "failed to poll io busy\n");
+ rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
return ret;
}
rtw89_pci_clr_idx_all(rtwdev);
-
- /* configure TX/RX op modes */
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE |
- B_AX_RX_TRUNC_MODE);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3);
- /* multi-tag mode */
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM,
- RTW89_MAC_TAG_NUM_8);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
- RTW89_MAC_WD_DMA_INTVL_256NS);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
- RTW89_MAC_WD_DMA_INTVL_256NS);
+ rtw89_pci_mode_op(rtwdev);
/* fill TRX BD indexes */
rtw89_pci_ops_reset(rtwdev);
@@ -1897,9 +2452,9 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
}
/* enable FW CMD queue to download firmware */
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12);
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+ rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
+ rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12);
+ rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -1907,10 +2462,13 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
+int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
+ if (!en)
+ return 0;
+
val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
if (rtw89_pci_ltr_is_err_reg_val(val))
return -EINVAL;
@@ -1937,31 +2495,83 @@ static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
return 0;
}
+EXPORT_SYMBOL(rtw89_pci_ltr_set);
+
+int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
+{
+ u32 dec_ctrl;
+ u32 val32;
+
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
+ if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+
+ if (!en) {
+ dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
+ dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
+ B_AX_LTR_REQ_DRV;
+ } else {
+ dec_ctrl |= B_AX_LTR_HW_DEC_EN;
+ }
+
+ dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
+ dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
+
+ if (en)
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
+ B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
+ PCI_LTR_IDLE_TIMER_3_2MS);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
+ rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
+ rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
+ rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- ret = rtw89_pci_ltr_set(rtwdev);
+ ret = info->ltr_set(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "pci ltr set fail\n");
return ret;
}
- if (rtwdev->chip->chip_id == RTL8852A) {
+ if (chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
}
- /* ADDR info 8-byte mode */
- rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
- B_AX_HOST_ADDR_INFO_8B_SEL);
- rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ /* ADDR info 8-byte mode */
+ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ }
/* enable DMA for all queues */
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+ rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
+ rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
/* Release PCI IO */
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
+ rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
return 0;
@@ -2490,23 +3100,82 @@ static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
skb_queue_len(&rtwpci->h2c_queue), true);
}
-static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev)
+void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
+
+ if (rtwpci->under_recovery) {
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = 0;
+ } else {
+ rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
+ B_AX_RXDMA_INT_EN |
+ B_AX_RXP1DMA_INT_EN |
+ B_AX_RPQDMA_INT_EN |
+ B_AX_RXDMA_STUCK_INT_EN |
+ B_AX_RDU_INT_EN |
+ B_AX_RPQBD_FULL_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+
+ rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
+ }
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
+
+static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = 0;
+}
+
+static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
+ B_AX_HS1ISR_IND_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
B_AX_RXDMA_INT_EN |
B_AX_RXP1DMA_INT_EN |
B_AX_RPQDMA_INT_EN |
B_AX_RXDMA_STUCK_INT_EN |
B_AX_RDU_INT_EN |
- B_AX_RPQBD_FULL_INT_EN |
- B_AX_HS0ISR_IND_INT_EN;
+ B_AX_RPQBD_FULL_INT_EN;
+ rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
+}
- rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
+static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
}
+void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->under_recovery)
+ rtw89_pci_recovery_intr_mask_v1(rtwdev);
+ else if (rtwpci->low_power)
+ rtw89_pci_low_power_intr_mask_v1(rtwdev);
+ else
+ rtw89_pci_default_intr_mask_v1(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
+
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -2529,7 +3198,7 @@ static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
goto err_free_vector;
}
- rtw89_pci_default_intr_mask(rtwdev);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
return 0;
@@ -2689,17 +3358,18 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 val32;
if (en == MAC_AX_FUNC_EN) {
val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
+ rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32);
val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
} else {
val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
+ rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32);
val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
@@ -2850,7 +3520,7 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
if (likely(rtwpci->running))
- rtw89_pci_enable_intr(rtwdev, rtwpci);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
@@ -2914,6 +3584,8 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.reset = rtw89_pci_ops_reset,
.start = rtw89_pci_ops_start,
.stop = rtw89_pci_ops_stop,
+ .pause = rtw89_pci_ops_pause,
+ .switch_mode = rtw89_pci_ops_switch_mode,
.recalc_int_mit = rtw89_pci_recalc_int_mit,
.read8 = rtw89_pci_ops_read8,
@@ -2931,6 +3603,9 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
.dump_err_status = rtw89_pci_ops_dump_err_status,
.napi_poll = rtw89_pci_napi_poll,
+
+ .recovery_start = rtw89_pci_ops_recovery_start,
+ .recovery_complete = rtw89_pci_ops_recovery_complete,
};
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2938,6 +3613,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
const struct rtw89_driver_info *info;
+ const struct rtw89_pci_info *pci_info;
int driver_data_size;
int ret;
@@ -2948,20 +3624,21 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
}
+ info = (const struct rtw89_driver_info *)id->driver_data;
+ pci_info = info->bus.pci;
+
rtwdev = hw->priv;
rtwdev->hw = hw;
rtwdev->dev = &pdev->dev;
+ rtwdev->chip = info->chip;
+ rtwdev->pci_info = info->bus.pci;
rtwdev->hci.ops = &rtw89_pci_ops;
rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
- rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM;
- rtwdev->hci.cpwm_addr = R_AX_CPWM;
+ rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
+ rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
- info = (const struct rtw89_driver_info *)id->driver_data;
- rtwdev->chip = info->chip;
- rtwdev->pci_info = info->bus.pci;
-
ret = rtw89_core_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to initialise core\n");
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index b84acd0d0582..bb585ed19190 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -12,6 +12,9 @@
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
#define RAC_ANA10 0x10
+#define RAC_REG_REV2 0x1B
+#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
+#define PCIE_DPHY_DLY_25US 0x1
#define RAC_ANA19 0x19
#define RAC_ANA1F 0x1F
#define RAC_ANA24 0x24
@@ -35,6 +38,58 @@
#define R_AX_MDIO_WDATA 0x10A4
#define R_AX_MDIO_RDATA 0x10A6
+#define R_AX_PCIE_PS_CTRL_V1 0x3008
+#define B_AX_CMAC_EXIT_L1_EN BIT(7)
+#define B_AX_DMAC0_EXIT_L1_EN BIT(6)
+#define B_AX_SEL_XFER_PENDING BIT(3)
+#define B_AX_SEL_REQ_ENTR_L1 BIT(2)
+#define B_AX_SEL_REQ_EXIT_L1 BIT(0)
+
+#define R_AX_PCIE_BG_CLR 0x303C
+#define B_AX_BG_CLR_ASYNC_M3 BIT(4)
+
+#define R_AX_PCIE_IO_RCY_M1 0x3100
+#define B_AX_PCIE_IO_RCY_P_M1 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_M1 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_M1 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_M1 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_M1 0x3104
+#define B_AX_PCIE_WDT_TIMER_M1_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_M2 0x310C
+#define B_AX_PCIE_IO_RCY_P_M2 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_M2 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_M2 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_M2 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_M2 0x3110
+#define B_AX_PCIE_WDT_TIMER_M2_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_E0 0x3118
+#define B_AX_PCIE_IO_RCY_P_E0 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_E0 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_E0 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_E0 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_E0 0x311C
+#define B_AX_PCIE_WDT_TIMER_E0_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_S1 0x3124
+#define B_AX_PCIE_IO_RCY_RP_S1 BIT(7)
+#define B_AX_PCIE_IO_RCY_WP_S1 BIT(6)
+#define B_AX_PCIE_IO_RCY_WDT_RP_S1 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_WP_S1 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_S1 BIT(3)
+#define B_AX_PCIE_IO_RCY_RTRIG_S1 BIT(1)
+#define B_AX_PCIE_IO_RCY_WTRIG_S1 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_S1 0x3128
+#define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0)
+
+#define R_RAC_DIRECT_OFFSET_G1 0x3800
+#define R_RAC_DIRECT_OFFSET_G2 0x3880
+
#define RTW89_PCI_WR_RETRY_CNT 20
/* Interrupts */
@@ -42,6 +97,16 @@
#define B_AX_HALT_C2H_INT_EN BIT(21)
#define R_AX_HISR0 0x01A4
+#define R_AX_HIMR1 0x01A8
+#define B_AX_GPIO18_INT_EN BIT(2)
+#define B_AX_GPIO17_INT_EN BIT(1)
+#define B_AX_GPIO16_INT_EN BIT(0)
+
+#define R_AX_HISR1 0x01AC
+#define B_AX_GPIO18_INT BIT(2)
+#define B_AX_GPIO17_INT BIT(1)
+#define B_AX_GPIO16_INT BIT(0)
+
#define R_AX_MDIO_CFG 0x10A0
#define B_AX_MDIO_PHY_ADDR_MASK GENMASK(13, 12)
#define B_AX_MDIO_RFLAG BIT(9)
@@ -49,6 +114,7 @@
#define B_AX_MDIO_ADDR_MASK GENMASK(4, 0)
#define R_AX_PCIE_HIMR00 0x10B0
+#define R_AX_HAXI_HIMR00 0x10B0
#define B_AX_HC00ISR_IND_INT_EN BIT(27)
#define B_AX_HD1ISR_IND_INT_EN BIT(26)
#define B_AX_HD0ISR_IND_INT_EN BIT(25)
@@ -77,6 +143,7 @@
#define B_AX_RXDMA_INT_EN BIT(0)
#define R_AX_PCIE_HISR00 0x10B4
+#define R_AX_HAXI_HISR00 0x10B4
#define B_AX_HC00ISR_IND_INT BIT(27)
#define B_AX_HD1ISR_IND_INT BIT(26)
#define B_AX_HD0ISR_IND_INT BIT(25)
@@ -104,6 +171,10 @@
#define B_AX_RXP1DMA_INT BIT(1)
#define B_AX_RXDMA_INT BIT(0)
+#define R_AX_HAXI_HIMR10 0x11E0
+#define B_AX_TXDMA_CH11_INT_EN_V1 BIT(1)
+#define B_AX_TXDMA_CH10_INT_EN_V1 BIT(0)
+
#define R_AX_PCIE_HIMR10 0x13B0
#define B_AX_HC10ISR_IND_INT_EN BIT(28)
#define B_AX_TXDMA_CH11_INT_EN BIT(12)
@@ -114,7 +185,32 @@
#define B_AX_TXDMA_CH11_INT BIT(12)
#define B_AX_TXDMA_CH10_INT BIT(11)
+#define R_AX_PCIE_HIMR00_V1 0x30B0
+#define B_AX_HCI_AXIDMA_INT_EN BIT(29)
+#define B_AX_HC00ISR_IND_INT_EN_V1 BIT(28)
+#define B_AX_HD1ISR_IND_INT_EN_V1 BIT(27)
+#define B_AX_HD0ISR_IND_INT_EN_V1 BIT(26)
+#define B_AX_HS1ISR_IND_INT_EN BIT(25)
+#define B_AX_PCIE_DBG_STE_INT_EN BIT(13)
+
+#define R_AX_PCIE_HISR00_V1 0x30B4
+#define B_AX_HCI_AXIDMA_INT BIT(29)
+#define B_AX_HC00ISR_IND_INT_V1 BIT(28)
+#define B_AX_HD1ISR_IND_INT_V1 BIT(27)
+#define B_AX_HD0ISR_IND_INT_V1 BIT(26)
+#define B_AX_HS1ISR_IND_INT BIT(25)
+#define B_AX_PCIE_DBG_STE_INT BIT(13)
+
/* TX/RX */
+#define R_AX_DRV_FW_HSK_0 0x01B0
+#define R_AX_DRV_FW_HSK_1 0x01B4
+#define R_AX_DRV_FW_HSK_2 0x01B8
+#define R_AX_DRV_FW_HSK_3 0x01BC
+#define R_AX_DRV_FW_HSK_4 0x01C0
+#define R_AX_DRV_FW_HSK_5 0x01C4
+#define R_AX_DRV_FW_HSK_6 0x01C8
+#define R_AX_DRV_FW_HSK_7 0x01CC
+
#define R_AX_RXQ_RXBD_IDX 0x1050
#define R_AX_RPQ_RXBD_IDX 0x1054
#define R_AX_ACH0_TXBD_IDX 0x1058
@@ -321,6 +417,19 @@
#define B_AX_PCIEIO_TX_BUSY BIT(21)
#define B_AX_PCIEIO_BUSY BIT(20)
#define B_AX_WPDMA_BUSY BIT(19)
+#define B_AX_CH12_BUSY BIT(18)
+#define B_AX_CH9_BUSY BIT(17)
+#define B_AX_CH8_BUSY BIT(16)
+#define B_AX_ACH7_BUSY BIT(15)
+#define B_AX_ACH6_BUSY BIT(14)
+#define B_AX_ACH5_BUSY BIT(13)
+#define B_AX_ACH4_BUSY BIT(12)
+#define B_AX_ACH3_BUSY BIT(11)
+#define B_AX_ACH2_BUSY BIT(10)
+#define B_AX_ACH1_BUSY BIT(9)
+#define B_AX_ACH0_BUSY BIT(8)
+#define B_AX_RPQ_BUSY BIT(1)
+#define B_AX_RXQ_BUSY BIT(0)
#define R_AX_PCIE_DMA_BUSY2 0x131C
#define B_AX_CH11_BUSY BIT(1)
@@ -330,6 +439,7 @@
#define R_AX_PCIE_INIT_CFG2 0x1004
#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
#define B_AX_WD_ITVL_ACT GENMASK(19, 16)
+#define B_AX_PCIE_RX_APPLEN_MASK GENMASK(13, 0)
#define R_AX_PCIE_PS_CTRL 0x1008
#define B_AX_L1OFF_PWR_OFF_EN BIT(5)
@@ -356,11 +466,22 @@
#define B_AX_PCIE_TXBD_LEN0 BIT(1)
#define B_AX_PCIE_TXBD_4KBOUD_LENERR BIT(0)
+#define R_AX_TXBD_RWPTR_CLR2_V1 0x11C4
+#define B_AX_CLR_CH11_IDX BIT(1)
+#define B_AX_CLR_CH10_IDX BIT(0)
+
#define R_AX_LBC_WATCHDOG 0x11D8
#define B_AX_LBC_TIMER GENMASK(7, 4)
#define B_AX_LBC_FLAG BIT(1)
#define B_AX_LBC_EN BIT(0)
+#define R_AX_RXBD_RWPTR_CLR_V1 0x1200
+#define B_AX_CLR_RPQ_IDX BIT(1)
+#define B_AX_CLR_RXQ_IDX BIT(0)
+
+#define R_AX_HAXI_EXP_CTRL 0x1204
+#define B_AX_MAX_TAG_NUM_V1_MASK GENMASK(2, 0)
+
#define R_AX_PCIE_EXP_CTRL 0x13F0
#define B_AX_EN_CHKDSC_NO_RX_STUCK BIT(20)
#define B_AX_MAX_TAG_NUM GENMASK(18, 16)
@@ -369,6 +490,9 @@
#define R_AX_PCIE_RX_PREF_ADV 0x13F4
#define B_AX_RXDMA_PREF_ADV_EN BIT(0)
+#define R_AX_PCIE_HRPWM_V1 0x30C0
+#define R_AX_PCIE_CRPWM 0x30C4
+
#define RTW89_PCI_TXBD_NUM_MAX 256
#define RTW89_PCI_RXBD_NUM_MAX 256
#define RTW89_PCI_TXWD_NUM_MAX 512
@@ -433,6 +557,121 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+enum mac_ax_bd_trunc_mode {
+ MAC_AX_BD_NORM,
+ MAC_AX_BD_TRUNC,
+ MAC_AX_BD_DEF = 0xFE
+};
+
+enum mac_ax_rxbd_mode {
+ MAC_AX_RXBD_PKT,
+ MAC_AX_RXBD_SEP,
+ MAC_AX_RXBD_DEF = 0xFE
+};
+
+enum mac_ax_tag_mode {
+ MAC_AX_TAG_SGL,
+ MAC_AX_TAG_MULTI,
+ MAC_AX_TAG_DEF = 0xFE
+};
+
+enum mac_ax_tx_burst {
+ MAC_AX_TX_BURST_16B = 0,
+ MAC_AX_TX_BURST_32B = 1,
+ MAC_AX_TX_BURST_64B = 2,
+ MAC_AX_TX_BURST_V1_64B = 0,
+ MAC_AX_TX_BURST_128B = 3,
+ MAC_AX_TX_BURST_V1_128B = 1,
+ MAC_AX_TX_BURST_256B = 4,
+ MAC_AX_TX_BURST_V1_256B = 2,
+ MAC_AX_TX_BURST_512B = 5,
+ MAC_AX_TX_BURST_1024B = 6,
+ MAC_AX_TX_BURST_2048B = 7,
+ MAC_AX_TX_BURST_DEF = 0xFE
+};
+
+enum mac_ax_rx_burst {
+ MAC_AX_RX_BURST_16B = 0,
+ MAC_AX_RX_BURST_32B = 1,
+ MAC_AX_RX_BURST_64B = 2,
+ MAC_AX_RX_BURST_V1_64B = 0,
+ MAC_AX_RX_BURST_128B = 3,
+ MAC_AX_RX_BURST_V1_128B = 1,
+ MAC_AX_RX_BURST_V1_256B = 0,
+ MAC_AX_RX_BURST_DEF = 0xFE
+};
+
+enum mac_ax_wd_dma_intvl {
+ MAC_AX_WD_DMA_INTVL_0S,
+ MAC_AX_WD_DMA_INTVL_256NS,
+ MAC_AX_WD_DMA_INTVL_512NS,
+ MAC_AX_WD_DMA_INTVL_768NS,
+ MAC_AX_WD_DMA_INTVL_1US,
+ MAC_AX_WD_DMA_INTVL_1_5US,
+ MAC_AX_WD_DMA_INTVL_2US,
+ MAC_AX_WD_DMA_INTVL_4US,
+ MAC_AX_WD_DMA_INTVL_8US,
+ MAC_AX_WD_DMA_INTVL_16US,
+ MAC_AX_WD_DMA_INTVL_DEF = 0xFE
+};
+
+enum mac_ax_multi_tag_num {
+ MAC_AX_TAG_NUM_1,
+ MAC_AX_TAG_NUM_2,
+ MAC_AX_TAG_NUM_3,
+ MAC_AX_TAG_NUM_4,
+ MAC_AX_TAG_NUM_5,
+ MAC_AX_TAG_NUM_6,
+ MAC_AX_TAG_NUM_7,
+ MAC_AX_TAG_NUM_8,
+ MAC_AX_TAG_NUM_DEF = 0xFE
+};
+
+enum mac_ax_lbc_tmr {
+ MAC_AX_LBC_TMR_8US = 0,
+ MAC_AX_LBC_TMR_16US,
+ MAC_AX_LBC_TMR_32US,
+ MAC_AX_LBC_TMR_64US,
+ MAC_AX_LBC_TMR_128US,
+ MAC_AX_LBC_TMR_256US,
+ MAC_AX_LBC_TMR_512US,
+ MAC_AX_LBC_TMR_1MS,
+ MAC_AX_LBC_TMR_2MS,
+ MAC_AX_LBC_TMR_4MS,
+ MAC_AX_LBC_TMR_8MS,
+ MAC_AX_LBC_TMR_DEF = 0xFE
+};
+
+enum mac_ax_pcie_func_ctrl {
+ MAC_AX_PCIE_DISABLE = 0,
+ MAC_AX_PCIE_ENABLE = 1,
+ MAC_AX_PCIE_DEFAULT = 0xFE,
+ MAC_AX_PCIE_IGNORE = 0xFF
+};
+
+enum mac_ax_io_rcy_tmr {
+ MAC_AX_IO_RCY_ANA_TMR_2MS = 24000,
+ MAC_AX_IO_RCY_ANA_TMR_4MS = 48000,
+ MAC_AX_IO_RCY_ANA_TMR_6MS = 72000,
+ MAC_AX_IO_RCY_ANA_TMR_DEF = 0xFE
+};
+
+enum rtw89_pci_intr_mask_cfg {
+ RTW89_PCI_INTR_MASK_RESET,
+ RTW89_PCI_INTR_MASK_NORMAL,
+ RTW89_PCI_INTR_MASK_LOW_POWER,
+ RTW89_PCI_INTR_MASK_RECOVERY_START,
+ RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE,
+};
+
+struct rtw89_pci_isrs;
+struct rtw89_pci;
+
+struct rtw89_pci_bd_idx_addr {
+ u32 tx_bd_addrs[RTW89_TXCH_NUM];
+ u32 rx_bd_addrs[RTW89_RXCH_NUM];
+};
+
struct rtw89_pci_ch_dma_addr {
u32 num;
u32 idx;
@@ -447,7 +686,50 @@ struct rtw89_pci_ch_dma_addr_set {
};
struct rtw89_pci_info {
+ enum mac_ax_bd_trunc_mode txbd_trunc_mode;
+ enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
+ enum mac_ax_rxbd_mode rxbd_mode;
+ enum mac_ax_tag_mode tag_mode;
+ enum mac_ax_tx_burst tx_burst;
+ enum mac_ax_rx_burst rx_burst;
+ enum mac_ax_wd_dma_intvl wd_dma_idle_intvl;
+ enum mac_ax_wd_dma_intvl wd_dma_act_intvl;
+ enum mac_ax_multi_tag_num multi_tag_num;
+ enum mac_ax_pcie_func_ctrl lbc_en;
+ enum mac_ax_lbc_tmr lbc_tmr;
+ enum mac_ax_pcie_func_ctrl autok_en;
+ enum mac_ax_pcie_func_ctrl io_rcy_en;
+ enum mac_ax_io_rcy_tmr io_rcy_tmr;
+
+ u32 init_cfg_reg;
+ u32 txhci_en_bit;
+ u32 rxhci_en_bit;
+ u32 rxbd_mode_bit;
+ u32 exp_ctrl_reg;
+ u32 max_tag_num_mask;
+ u32 rxbd_rwptr_clr_reg;
+ u32 txbd_rwptr_clr2_reg;
+ u32 dma_stop1_reg;
+ u32 dma_stop2_reg;
+ u32 dma_busy1_reg;
+ u32 dma_busy2_reg;
+ u32 dma_busy3_reg;
+
+ u32 rpwm_addr;
+ u32 cpwm_addr;
+ const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+
+ int (*ltr_set)(struct rtw89_dev *rtwdev, bool en);
+ u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+ void (*config_intr_mask)(struct rtw89_dev *rtwdev);
+ void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+ void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+ void (*recognize_intrs)(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
};
struct rtw89_pci_bd_ram {
@@ -493,6 +775,18 @@ struct rtw89_pci_tx_addr_info_32 {
__le32 dma;
} __packed;
+#define RTW89_TXADDR_INFO_NR_V1 10
+
+struct rtw89_pci_tx_addr_info_32_v1 {
+ __le16 length_opt;
+#define B_PCIADDR_LEN_V1_MASK GENMASK(10, 0)
+#define B_PCIADDR_HIGH_SEL_V1_MASK GENMASK(14, 11)
+#define B_PCIADDR_LS_V1_MASK BIT(15)
+#define TXADDR_INFO_LENTHG_V1_MAX ALIGN_DOWN(BIT(11) - 1, 4)
+ __le16 dma_low_lsb;
+ __le16 dma_low_msb;
+} __packed;
+
#define RTW89_PCI_RPP_POLLUTED BIT(31)
#define RTW89_PCI_RPP_SEQ GENMASK(30, 16)
#define RTW89_PCI_RPP_TX_STATUS GENMASK(15, 13)
@@ -582,6 +876,7 @@ struct rtw89_pci_rx_ring {
};
struct rtw89_pci_isrs {
+ u32 ind_isrs;
u32 halt_c2h_isrs;
u32 isrs[2];
};
@@ -594,11 +889,15 @@ struct rtw89_pci {
/* protect TRX resources (exclude RXQ) */
spinlock_t trx_lock;
bool running;
+ bool low_power;
+ bool under_recovery;
struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
struct sk_buff_head h2c_queue;
struct sk_buff_head h2c_release_queue;
+ DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM);
+ u32 ind_intrs;
u32 halt_c2h_intrs;
u32 intrs[2];
void __iomem *mmap;
@@ -697,5 +996,95 @@ struct pci_device_id;
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
void rtw89_pci_remove(struct pci_dev *pdev);
+int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en);
+int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en);
+u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
+void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
+void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
+void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
+
+static inline
+u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ return info->fill_txaddr_info(rtwdev, txaddr_info_addr, total_len,
+ dma, add_info_nr);
+}
+
+static inline void rtw89_chip_config_intr_mask(struct rtw89_dev *rtwdev,
+ enum rtw89_pci_intr_mask_cfg cfg)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ switch (cfg) {
+ default:
+ case RTW89_PCI_INTR_MASK_RESET:
+ rtwpci->low_power = false;
+ rtwpci->under_recovery = false;
+ break;
+ case RTW89_PCI_INTR_MASK_NORMAL:
+ rtwpci->low_power = false;
+ break;
+ case RTW89_PCI_INTR_MASK_LOW_POWER:
+ rtwpci->low_power = true;
+ break;
+ case RTW89_PCI_INTR_MASK_RECOVERY_START:
+ rtwpci->under_recovery = true;
+ break;
+ case RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE:
+ rtwpci->under_recovery = false;
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "Configure PCI interrupt mask mode low_power=%d under_recovery=%d\n",
+ rtwpci->low_power, rtwpci->under_recovery);
+
+ info->config_intr_mask(rtwdev);
+}
+
+static inline
+void rtw89_chip_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->enable_intr(rtwdev, rtwpci);
+}
+
+static inline
+void rtw89_chip_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->disable_intr(rtwdev, rtwpci);
+}
+
+static inline
+void rtw89_chip_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->recognize_intrs(rtwdev, rtwpci, isrs);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index ac211d897311..33494e8451cf 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -76,10 +76,10 @@ static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
static u64 get_he_ra_mask(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_he_cap cap = sta->he_cap;
+ struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
u16 mcs_map;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
@@ -172,17 +172,17 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
return -1;
}
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
RA_MASK_HE_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
RA_MASK_HE_2SS_RATES);
- } else if (sta->vht_cap.vht_supported) {
+ } else if (sta->deflink.vht_cap.vht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
RA_MASK_VHT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
RA_MASK_VHT_2SS_RATES);
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
RA_MASK_HT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
@@ -223,57 +223,57 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
memset(ra, 0, sizeof(*ra));
/* Set the ra mask from sta's capability */
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
ra_mask |= get_he_ra_mask(sta);
high_rate_masks = rtw89_ra_mask_he_rates;
- if (sta->he_cap.he_cap_elem.phy_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
stbc_en = 1;
- if (sta->he_cap.he_cap_elem.phy_cap_info[1] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
- } else if (sta->vht_cap.vht_supported) {
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
mode |= RTW89_RA_MODE_VHT;
csi_mode = RTW89_RA_RPT_MODE_VHT;
/* MCS9, MCS8, MCS7 */
ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
high_rate_masks = rtw89_ra_mask_vht_rates;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = 1;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = 1;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
mode |= RTW89_RA_MODE_HT;
csi_mode = RTW89_RA_RPT_MODE_HT;
- ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) |
- ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) |
- (sta->ht_cap.mcs.rx_mask[1] << 24) |
- (sta->ht_cap.mcs.rx_mask[0] << 12);
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = 1;
}
switch (rtwdev->hal.current_band_type) {
case RTW89_BAND_2G:
- ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
- if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
+ ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] <= 0xf)
mode |= RTW89_RA_MODE_CCK;
else
mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
break;
case RTW89_BAND_5G:
- ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
break;
case RTW89_BAND_6G:
- ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4;
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
break;
default:
@@ -302,30 +302,30 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
bw_mode = RTW89_CHANNEL_WIDTH_160;
- sgi = sta->vht_cap.vht_supported &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
break;
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW89_CHANNEL_WIDTH_80;
- sgi = sta->vht_cap.vht_supported &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
break;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW89_CHANNEL_WIDTH_40;
- sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
break;
default:
bw_mode = RTW89_CHANNEL_WIDTH_20;
- sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
break;
}
- if (sta->he_cap.he_cap_elem.phy_cap_info[3] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
ra->dcm_cap = 1;
@@ -340,7 +340,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->macid = rtwsta->mac_id;
ra->stbc_cap = stbc_en;
ra->ldpc_cap = ldpc_en;
- ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
ra->en_sgi = sgi;
ra->ra_mask = ra_mask;
@@ -790,6 +790,245 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
rtw89_phy_write32(rtwdev, reg->addr, reg->data);
}
+union rtw89_phy_bb_gain_arg {
+ u32 addr;
+ struct {
+ union {
+ u8 type;
+ struct {
+ u8 rxsc_start:4;
+ u8 bw:4;
+ };
+ };
+ u8 path;
+ u8 gain_band;
+ u8 cfg_type;
+ };
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+enum rtw89_phy_bb_rxsc_start_idx {
+ RTW89_BB_RXSC_START_IDX_FULL = 0,
+ RTW89_BB_RXSC_START_IDX_20 = 1,
+ RTW89_BB_RXSC_START_IDX_20_1 = 5,
+ RTW89_BB_RXSC_START_IDX_40 = 9,
+ RTW89_BB_RXSC_START_IDX_80 = 13,
+};
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 rxsc_start = arg.rxsc_start;
+ u8 bw = arg.bw;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ u8 rxsc;
+ s8 ofst;
+ int i;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ gain->rpl_ofst_20[gband][path] = (s8)data;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_40[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_40[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_80[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_80[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_80[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_160[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
+ arg.addr, data, bw);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain_bypass[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain_bypass[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
+ return;
+
+ if (arg.path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
static void
rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@@ -1033,9 +1272,13 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *bb_table = chip->bb_table;
+ const struct rtw89_phy_table *bb_gain_table = chip->bb_gain_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
+ if (bb_gain_table)
+ rtw89_phy_init_reg(rtwdev, bb_gain_table,
+ rtw89_phy_config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -1686,7 +1929,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
break;
case RTW89_RA_RPT_MODE_HT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
- if (rtwdev->fw.old_ht_ra_format)
+ if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
else
@@ -2930,6 +3173,9 @@ static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
u32 tmp;
u8 i;
+ if (!rtwdev->hal.support_igi)
+ return;
+
tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
B_PATH0_IB_PKPW_MSK);
dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
@@ -3180,6 +3426,24 @@ static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
+static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+
+ if (!rtwdev->hal.support_igi)
+ return;
+
+ if (dig->force_gaincode_idx_en) {
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "Force gaincode index enabled.\n");
+ } else {
+ rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
+ &dig->cur_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
+ }
+}
+
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
@@ -3294,15 +3558,7 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
- if (dig->force_gaincode_idx_en) {
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
- rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "Force gaincode index enabled.\n");
- } else {
- rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
- &dig->cur_gaincode);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
- }
+ rtw89_phy_dig_config_igi(rtwdev);
rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
@@ -3336,6 +3592,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_load_txpwr_table(rtwdev, chip->byr_table);
rtw89_chip_set_txpwr_ctrl(rtwdev);
rtw89_chip_power_trim(rtwdev);
+ rtw89_chip_cfg_txrx_path(rtwdev);
}
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
@@ -3407,3 +3664,109 @@ rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
_rfk_handler[p->flag](rtwdev, p);
}
EXPORT_SYMBOL(rtw89_rfk_parser);
+
+#define RTW89_TSSI_FAST_MODE_NUM 4
+
+static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
+ {0xD934, 0xff0000},
+ {0xD934, 0xff000000},
+ {0xD938, 0xff},
+ {0xD934, 0xff00},
+};
+
+static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
+ {0xD930, 0xff0000},
+ {0xD930, 0xff000000},
+ {0xD934, 0xff},
+ {0xD930, 0xff00},
+};
+
+static
+void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg,
+ u32 val)
+{
+ const struct rtw89_reg_def *regs;
+ u32 reg;
+ int i;
+
+ if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
+ regs = rtw89_tssi_fastmode_regs_flat;
+ else
+ regs = rtw89_tssi_fastmode_regs_level;
+
+ for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
+ reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
+ }
+}
+
+static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
+ {0xD91C, 0xff000000},
+ {0xD920, 0xff},
+ {0xD920, 0xff00},
+ {0xD920, 0xff0000},
+ {0xD920, 0xff000000},
+ {0xD924, 0xff},
+ {0xD924, 0xff00},
+ {0xD914, 0xff000000},
+ {0xD918, 0xff},
+ {0xD918, 0xff00},
+ {0xD918, 0xff0000},
+ {0xD918, 0xff000000},
+ {0xD91C, 0xff},
+ {0xD91C, 0xff00},
+ {0xD91C, 0xff0000},
+};
+
+static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
+ {0xD910, 0xff},
+ {0xD910, 0xff00},
+ {0xD910, 0xff0000},
+ {0xD910, 0xff000000},
+ {0xD914, 0xff},
+ {0xD914, 0xff00},
+ {0xD914, 0xff0000},
+ {0xD908, 0xff},
+ {0xD908, 0xff00},
+ {0xD908, 0xff0000},
+ {0xD908, 0xff000000},
+ {0xD90C, 0xff},
+ {0xD90C, 0xff00},
+ {0xD90C, 0xff0000},
+ {0xD90C, 0xff000000},
+};
+
+void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_reg_def *regs;
+ const u32 *data;
+ u32 reg;
+ int i;
+
+ if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
+ return;
+
+ if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
+ regs = rtw89_tssi_bandedge_regs_flat;
+ else
+ regs = rtw89_tssi_bandedge_regs_level;
+
+ data = chip->tssi_dbw_table->data[bandedge_cfg];
+
+ for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
+ reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
+
+ rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
+ data[RTW89_TSSI_SBW20]);
+}
+EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index adcfcb4c2429..3ca5efa4c097 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -221,6 +221,35 @@ enum rtw89_dig_gain_tia_idx {
RTW89_DIG_GAIN_TIA_IDX1 = 1
};
+enum rtw89_tssi_bandedge_cfg {
+ RTW89_TSSI_BANDEDGE_FLAT,
+ RTW89_TSSI_BANDEDGE_LOW,
+ RTW89_TSSI_BANDEDGE_MID,
+ RTW89_TSSI_BANDEDGE_HIGH,
+
+ RTW89_TSSI_CFG_NUM,
+};
+
+enum rtw89_tssi_sbw_idx {
+ RTW89_TSSI_SBW20,
+ RTW89_TSSI_SBW40_0,
+ RTW89_TSSI_SBW40_1,
+ RTW89_TSSI_SBW80_0,
+ RTW89_TSSI_SBW80_1,
+ RTW89_TSSI_SBW80_2,
+ RTW89_TSSI_SBW80_3,
+ RTW89_TSSI_SBW160_0,
+ RTW89_TSSI_SBW160_1,
+ RTW89_TSSI_SBW160_2,
+ RTW89_TSSI_SBW160_3,
+ RTW89_TSSI_SBW160_4,
+ RTW89_TSSI_SBW160_5,
+ RTW89_TSSI_SBW160_6,
+ RTW89_TSSI_SBW160_7,
+
+ RTW89_TSSI_SBW_NUM,
+};
+
struct rtw89_txpwr_byrate_cfg {
enum rtw89_band band;
enum rtw89_nss nss;
@@ -233,18 +262,22 @@ struct rtw89_txpwr_byrate_cfg {
#define DELTA_SWINGIDX_SIZE 30
struct rtw89_txpwr_track_cfg {
- const u8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE];
- const u8 *delta_swingidx_2gb_n;
- const u8 *delta_swingidx_2gb_p;
- const u8 *delta_swingidx_2ga_n;
- const u8 *delta_swingidx_2ga_p;
- const u8 *delta_swingidx_2g_cck_b_n;
- const u8 *delta_swingidx_2g_cck_b_p;
- const u8 *delta_swingidx_2g_cck_a_n;
- const u8 *delta_swingidx_2g_cck_a_p;
+ const s8 (*delta_swingidx_6gb_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6gb_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6ga_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6ga_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE];
+ const s8 *delta_swingidx_2gb_n;
+ const s8 *delta_swingidx_2gb_p;
+ const s8 *delta_swingidx_2ga_n;
+ const s8 *delta_swingidx_2ga_p;
+ const s8 *delta_swingidx_2g_cck_b_n;
+ const s8 *delta_swingidx_2g_cck_b_p;
+ const s8 *delta_swingidx_2g_cck_a_n;
+ const s8 *delta_swingidx_2g_cck_a_p;
};
struct rtw89_phy_dig_gain_cfg {
@@ -259,6 +292,10 @@ struct rtw89_phy_dig_gain_table {
const struct rtw89_phy_dig_gain_cfg *cfg_tia_a;
};
+struct rtw89_phy_tssi_dbw_table {
+ u32 data[RTW89_TSSI_CFG_NUM][RTW89_TSSI_SBW_NUM];
+};
+
struct rtw89_phy_reg3_tbl {
const struct rtw89_reg3_def *reg3;
int size;
@@ -270,6 +307,15 @@ const struct rtw89_phy_reg3_tbl _name ## _tbl = { \
.size = ARRAY_SIZE(_name), \
}
+struct rtw89_nbi_reg_def {
+ struct rtw89_reg_def notch1_idx;
+ struct rtw89_reg_def notch1_frac_idx;
+ struct rtw89_reg_def notch1_en;
+ struct rtw89_reg_def notch2_idx;
+ struct rtw89_reg_def notch2_frac_idx;
+ struct rtw89_reg_def notch2_en;
+};
+
extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
@@ -442,5 +488,8 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 7eaa01e41ef2..a90b33720588 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -29,6 +29,36 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
return 0;
}
+static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
+ bool enter)
+{
+ ieee80211_stop_queues(rtwdev->hw);
+ rtwdev->hci.paused = true;
+ flush_work(&rtwdev->txq_work);
+ ieee80211_wake_queues(rtwdev->hw);
+
+ rtw89_hci_pause(rtwdev, true);
+ rtw89_mac_power_mode_change(rtwdev, enter);
+ rtw89_hci_switch_mode(rtwdev, enter);
+ rtw89_hci_pause(rtwdev, false);
+
+ rtwdev->hci.paused = false;
+
+ if (!enter) {
+ local_bh_disable();
+ napi_schedule(&rtwdev->napi);
+ local_bh_enable();
+ }
+}
+
+static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
+{
+ if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
+ rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
+ else
+ rtw89_mac_power_mode_change(rtwdev, enter);
+}
+
static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
{
if (!rtwdev->ps_mode)
@@ -37,7 +67,7 @@ static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
if (test_and_set_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
return;
- rtw89_mac_power_mode_change(rtwdev, true);
+ rtw89_ps_power_mode_change(rtwdev, true);
}
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -46,7 +76,7 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
return;
if (test_and_clear_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
- rtw89_mac_power_mode_change(rtwdev, false);
+ rtw89_ps_power_mode_change(rtwdev, false);
}
static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 25b106788118..6f5d1012c90c 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -87,6 +87,8 @@
#define B_AX_BTMODE_MASK GENMASK(7, 6)
#define MAC_AX_BT_MODE_0_3 0
#define MAC_AX_BT_MODE_2 2
+#define MAC_AX_RTK_MODE 0
+#define MAC_AX_CSR_MODE 1
#define B_AX_ENBT BIT(5)
#define B_AX_EROM_EN BIT(4)
#define B_AX_ENUARTRX BIT(2)
@@ -103,11 +105,24 @@
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_FORCE_PWR_NGAT BIT(13)
#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
#define B_AX_PCIE_AUXCLK_GATE BIT(11)
#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
+#define R_AX_HCI_OPT_CTRL 0x0074
+#define BIT_WAKE_CTRL BIT(5)
+
+#define R_AX_HCI_BG_CTRL 0x0078
+#define B_AX_IBX_EN_VALUE BIT(15)
+#define B_AX_IB_EN_VALUE BIT(14)
+#define B_AX_FORCED_IB_EN BIT(4)
+#define B_AX_EN_REGBG BIT(3)
+#define B_AX_R_AX_BG_LPF BIT(2)
+#define B_AX_R_AX_BG GENMASK(1, 0)
+
#define R_AX_PLATFORM_ENABLE 0x0088
+#define B_AX_AXIDMA_EN BIT(3)
#define B_AX_WCPU_EN BIT(1)
#define B_AX_PLATFORM_EN BIT(0)
@@ -205,6 +220,7 @@
#define B_AX_EECS_PULL_LOW_EN BIT(16)
#define R_AX_WLRF_CTRL 0x02F0
+#define B_AX_AFC_AFEDIG BIT(17)
#define B_AX_WLRF1_CTRL_7 BIT(15)
#define B_AX_WLRF1_CTRL_1 BIT(9)
#define B_AX_WLRF_CTRL_7 BIT(7)
@@ -218,8 +234,60 @@
#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
+#define R_AX_AFE_OFF_CTRL1 0x0444
+#define B_AX_S1_LDO_VSEL_F_MASK GENMASK(25, 24)
+#define B_AX_S1_LDO2PWRCUT_F BIT(23)
+#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+
#define R_AX_FILTER_MODEL_ADDR 0x0C04
+#define R_AX_HAXI_INIT_CFG1 0x1000
+#define B_AX_WD_ITVL_IDLE_V1_MASK GENMASK(31, 28)
+#define B_AX_WD_ITVL_ACT_V1_MASK GENMASK(27, 24)
+#define B_AX_DMA_MODE_MASK GENMASK(19, 18)
+#define DMA_MOD_PCIE_1B 0x0
+#define DMA_MOD_PCIE_4B 0x1
+#define DMA_MOD_USB 0x2
+#define DMA_MOD_SDIO 0x3
+#define B_AX_STOP_AXI_MST BIT(17)
+#define B_AX_HAXI_RST_KEEP_REG BIT(16)
+#define B_AX_RXHCI_EN_V1 BIT(15)
+#define B_AX_RXBD_MODE_V1 BIT(14)
+#define B_AX_HAXI_MAX_RXDMA_MASK GENMASK(9, 8)
+#define B_AX_TXHCI_EN_V1 BIT(7)
+#define B_AX_FLUSH_AXI_MST BIT(4)
+#define B_AX_RST_BDRAM BIT(3)
+#define B_AX_HAXI_MAX_TXDMA_MASK GENMASK(1, 0)
+
+#define R_AX_HAXI_DMA_STOP1 0x1010
+#define B_AX_STOP_WPDMA BIT(19)
+#define B_AX_STOP_CH12 BIT(18)
+#define B_AX_STOP_CH9 BIT(17)
+#define B_AX_STOP_CH8 BIT(16)
+#define B_AX_STOP_ACH7 BIT(15)
+#define B_AX_STOP_ACH6 BIT(14)
+#define B_AX_STOP_ACH5 BIT(13)
+#define B_AX_STOP_ACH4 BIT(12)
+#define B_AX_STOP_ACH3 BIT(11)
+#define B_AX_STOP_ACH2 BIT(10)
+#define B_AX_STOP_ACH1 BIT(9)
+#define B_AX_STOP_ACH0 BIT(8)
+
+#define R_AX_HAXI_DMA_BUSY1 0x101C
+#define B_AX_HAXIIO_BUSY BIT(20)
+#define B_AX_WPDMA_BUSY BIT(19)
+#define B_AX_CH12_BUSY BIT(18)
+#define B_AX_CH9_BUSY BIT(17)
+#define B_AX_CH8_BUSY BIT(16)
+#define B_AX_ACH7_BUSY BIT(15)
+#define B_AX_ACH6_BUSY BIT(14)
+#define B_AX_ACH5_BUSY BIT(13)
+#define B_AX_ACH4_BUSY BIT(12)
+#define B_AX_ACH3_BUSY BIT(11)
+#define B_AX_ACH2_BUSY BIT(10)
+#define B_AX_ACH1_BUSY BIT(9)
+#define B_AX_ACH0_BUSY BIT(8)
+
#define R_AX_PCIE_DBG_CTRL 0x11C0
#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16)
#define B_AX_DBG_SEL_MASK GENMASK(15, 13)
@@ -228,6 +296,39 @@
#define B_AX_ASFF_FULL_NO_STK BIT(1)
#define B_AX_EN_STUCK_DBG BIT(0)
+#define R_AX_HAXI_DMA_STOP2 0x11C0
+#define B_AX_STOP_CH11 BIT(1)
+#define B_AX_STOP_CH10 BIT(0)
+
+#define R_AX_HAXI_DMA_BUSY2 0x11C8
+#define B_AX_CH11_BUSY BIT(1)
+#define B_AX_CH10_BUSY BIT(0)
+
+#define R_AX_HAXI_DMA_BUSY3 0x1208
+#define B_AX_RPQ_BUSY BIT(1)
+#define B_AX_RXQ_BUSY BIT(0)
+
+#define R_AX_LTR_DEC_CTRL 0x1600
+#define B_AX_LTR_IDX_DRV_VLD BIT(16)
+#define B_AX_LTR_CURR_IDX_DRV_MASK GENMASK(15, 14)
+#define B_AX_LTR_IDX_FW_VLD BIT(13)
+#define B_AX_LTR_CURR_IDX_FW_MASK GENMASK(12, 11)
+#define B_AX_LTR_IDX_HW_VLD BIT(10)
+#define B_AX_LTR_CURR_IDX_HW_MASK GENMASK(9, 8)
+#define B_AX_LTR_REQ_DRV BIT(7)
+#define B_AX_LTR_IDX_DRV_MASK GENMASK(6, 5)
+#define PCIE_LTR_IDX_IDLE 3
+#define B_AX_LTR_DRV_DEC_EN BIT(4)
+#define B_AX_LTR_FW_DEC_EN BIT(3)
+#define B_AX_LTR_HW_DEC_EN BIT(2)
+#define B_AX_LTR_SPACE_IDX_V1_MASK GENMASK(1, 0)
+#define LTR_EN_BITS (B_AX_LTR_HW_DEC_EN | B_AX_LTR_FW_DEC_EN | B_AX_LTR_DRV_DEC_EN)
+
+#define R_AX_LTR_LATENCY_IDX0 0x1604
+#define R_AX_LTR_LATENCY_IDX1 0x1608
+#define R_AX_LTR_LATENCY_IDX2 0x160C
+#define R_AX_LTR_LATENCY_IDX3 0x1610
+
#define R_AX_HCI_FC_CTRL_V1 0x1700
#define R_AX_CH_PAGE_CTRL_V1 0x1704
@@ -369,6 +470,7 @@
#define B_AX_APP_LTR_ACT BIT(5)
#define B_AX_APP_LTR_IDLE BIT(4)
#define B_AX_LTR_EN BIT(1)
+#define B_AX_LTR_WD_NOEMP_CHK_V1 BIT(1)
#define B_AX_LTR_HW_EN BIT(0)
#define R_AX_LTR_CTRL_1 0x8414
@@ -404,6 +506,21 @@
#define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1)
#define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0)
+#define R_AX_DMAC_ERR_IMR 0x8520
+#define B_AX_DLE_CPUIO_ERR_INT_EN BIT(10)
+#define B_AX_APB_BRIDGE_ERR_INT_EN BIT(9)
+#define B_AX_DISPATCH_ERR_INT_EN BIT(8)
+#define B_AX_PKTIN_ERR_INT_EN BIT(7)
+#define B_AX_PLE_DLE_ERR_INT_EN BIT(6)
+#define B_AX_TXPKTCTRL_ERR_INT_EN BIT(5)
+#define B_AX_WDE_DLE_ERR_INT_EN BIT(4)
+#define B_AX_STA_SCHEDULER_ERR_INT_EN BIT(3)
+#define B_AX_MPDU_ERR_INT_EN BIT(2)
+#define B_AX_WSEC_ERR_INT_EN BIT(1)
+#define B_AX_WDRLS_ERR_INT_EN BIT(0)
+#define DMAC_ERR_IMR_EN GENMASK(31, 0)
+#define DMAC_ERR_IMR_DIS 0
+
#define R_AX_DMAC_ERR_ISR 0x8524
#define B_AX_DLE_CPUIO_ERR_FLAG BIT(10)
#define B_AX_APB_BRIDGE_ERR_FLAG BIT(9)
@@ -427,13 +544,361 @@
#define B_AX_HOST_ADDR_INFO_8B_SEL BIT(0)
#define R_AX_HOST_DISPATCHER_ERR_IMR 0x8850
+#define B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN BIT(31)
+#define B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN BIT(30)
+#define B_AX_HDT_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_HDT_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_HDT_TOTAL_LEN_ERR_INT_EN BIT(26)
+#define B_AX_HDT_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_HDT_RXAGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_HDT_OUTPUT_ERR_INT_EN BIT(21)
+#define B_AX_HDT_RES_ERR_INT_EN BIT(20)
+#define B_AX_HDT_BURST_NUM_ERR_INT_EN BIT(19)
+#define B_AX_HDT_NULLPKT_ERR_INT_EN BIT(18)
+#define B_AX_HDT_FLOW_CTRL_ERR_INT_EN BIT(17)
+#define B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN BIT(16)
+#define B_AX_HDT_PLD_CMD_OVERLOW_INT_EN BIT(15)
+#define B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN BIT(14)
+#define B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN BIT(13)
+#define B_AX_HDT_TCP_CHK_ERR_INT_EN BIT(12)
+#define B_AX_HDT_TXPKTSIZE_ERR_INT_EN BIT(11)
+#define B_AX_HDT_PRE_COST_ERR_INT_EN BIT(10)
+#define B_AX_HDT_WD_CHK_ERR_INT_EN BIT(9)
+#define B_AX_HDT_CHANNEL_DMA_ERR_INT_EN BIT(8)
#define B_AX_HDT_OFFSET_UNMATCH_INT_EN BIT(7)
+#define B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN BIT(6)
+#define B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN BIT(5)
+#define B_AX_HDT_PERMU_UNDERFLOW_INT_EN BIT(4)
+#define B_AX_HDT_PERMU_OVERFLOW_INT_EN BIT(3)
#define B_AX_HDT_PKT_FAIL_DBG_INT_EN BIT(2)
+#define B_AX_HDT_CHANNEL_ID_ERR_INT_EN BIT(1)
+#define B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_HOST_DISP_IMR_CLR (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_CHANNEL_ID_ERR_INT_EN | \
+ B_AX_HDT_PKT_FAIL_DBG_INT_EN | \
+ B_AX_HDT_PERMU_OVERFLOW_INT_EN | \
+ B_AX_HDT_PERMU_UNDERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_OFFSET_UNMATCH_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_WD_CHK_ERR_INT_EN | \
+ B_AX_HDT_PRE_COST_ERR_INT_EN | \
+ B_AX_HDT_TXPKTSIZE_ERR_INT_EN | \
+ B_AX_HDT_TCP_CHK_ERR_INT_EN | \
+ B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN | \
+ B_AX_HDT_PLD_CMD_OVERLOW_INT_EN | \
+ B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_HDT_NULLPKT_ERR_INT_EN | \
+ B_AX_HDT_BURST_NUM_ERR_INT_EN | \
+ B_AX_HDT_RXAGG_CFG_ERR_INT_EN | \
+ B_AX_HDT_SHIFT_EN_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_HDT_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN)
+
+#define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
+#define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30)
+#define B_AX_HR_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_HR_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(26)
+#define B_AX_HR_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_HR_AGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN BIT(23)
+#define B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN BIT(22)
+#define B_AX_HT_ILL_CH_ERR_INT_EN BIT(20)
+#define B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN BIT(18)
+#define B_AX_HT_WD_LEN_OVER_ERR_INT_EN BIT(17)
+#define B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(16)
+#define B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(15)
+#define B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN BIT(14)
+#define B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN BIT(13)
+#define B_AX_HT_CHKSUM_FSM_ERR_INT_EN BIT(12)
+#define B_AX_HT_TXPKTSIZE_ERR_INT_EN BIT(11)
+#define B_AX_HT_PRE_SUB_ERR_INT_EN BIT(10)
+#define B_AX_HT_WD_CHKSUM_ERR_INT_EN BIT(9)
+#define B_AX_HT_CHANNEL_DMA_ERR_INT_EN BIT(8)
+#define B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN BIT(7)
+#define B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_AX_HT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_AX_HT_PKT_FAIL_ERR_INT_EN BIT(2)
+#define B_AX_HT_CH_ID_ERR_INT_EN BIT(1)
+#define B_AX_HT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_HOST_DISP_IMR_CLR_V1 (B_AX_HT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_HT_CH_ID_ERR_INT_EN | \
+ B_AX_HT_PKT_FAIL_ERR_INT_EN | \
+ B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_AX_HT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HT_WD_CHKSUM_ERR_INT_EN | \
+ B_AX_HT_PRE_SUB_ERR_INT_EN | \
+ B_AX_HT_TXPKTSIZE_ERR_INT_EN | \
+ B_AX_HT_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_WD_LEN_OVER_ERR_INT_EN | \
+ B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_HT_ILL_CH_ERR_INT_EN | \
+ B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN | \
+ B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN | \
+ B_AX_HR_AGG_CFG_ERR_INT_EN | \
+ B_AX_HR_SHIFT_EN_ERR_INT_EN | \
+ B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_HR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_HR_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET_V1 (B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_HT_ILL_CH_ERR_INT_EN | \
+ B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_HR_DMA_PROCESS_ERR_INT_EN)
#define R_AX_CPU_DISPATCHER_ERR_IMR 0x8854
+#define B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN BIT(31)
+#define B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN BIT(30)
+#define B_AX_CPU_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_CPU_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_CPU_TOTAL_LEN_ERR_INT_EN BIT(26)
#define B_AX_CPU_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_CPU_RXAGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_CPU_OUTPUT_ERR_INT_EN BIT(20)
+#define B_AX_CPU_RESP_ERR_INT_EN BIT(19)
+#define B_AX_CPU_BURST_NUM_ERR_INT_EN BIT(18)
+#define B_AX_CPU_NULLPKT_ERR_INT_EN BIT(17)
+#define B_AX_CPU_FLOW_CTRL_ERR_INT_EN BIT(16)
+#define B_AX_CPU_F2P_SEQ_ERR_INT_EN BIT(15)
+#define B_AX_CPU_F2P_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN BIT(13)
+#define B_AX_CPU_PLD_CMD_OVERLOW_INT_EN BIT(12)
+#define B_AX_CPU_PRE_COST_ERR_INT_EN BIT(11)
+#define B_AX_CPU_WD_CHK_ERR_INT_EN BIT(10)
+#define B_AX_CPU_CHANNEL_DMA_ERR_INT_EN BIT(9)
+#define B_AX_CPU_OFFSET_UNMATCH_INT_EN BIT(8)
+#define B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7)
+#define B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN BIT(6)
+#define B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN BIT(5)
+#define B_AX_CPU_PERMU_UNDERFLOW_INT_EN BIT(4)
+#define B_AX_CPU_PERMU_OVERFLOW_INT_EN BIT(3)
+#define B_AX_CPU_CHANNEL_ID_ERR_INT_EN BIT(2)
+#define B_AX_CPU_PKT_FAIL_DBG_INT_EN BIT(1)
+#define B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_CPU_DISP_IMR_CLR (B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_CPU_PKT_FAIL_DBG_INT_EN | \
+ B_AX_CPU_CHANNEL_ID_ERR_INT_EN | \
+ B_AX_CPU_PERMU_OVERFLOW_INT_EN | \
+ B_AX_CPU_PERMU_UNDERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN | \
+ B_AX_CPU_OFFSET_UNMATCH_INT_EN | \
+ B_AX_CPU_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_CPU_WD_CHK_ERR_INT_EN | \
+ B_AX_CPU_PRE_COST_ERR_INT_EN | \
+ B_AX_CPU_PLD_CMD_OVERLOW_INT_EN | \
+ B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_F2P_QSEL_ERR_INT_EN | \
+ B_AX_CPU_F2P_SEQ_ERR_INT_EN | \
+ B_AX_CPU_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_CPU_NULLPKT_ERR_INT_EN | \
+ B_AX_CPU_BURST_NUM_ERR_INT_EN | \
+ B_AX_CPU_RXAGG_CFG_ERR_INT_EN | \
+ B_AX_CPU_SHIFT_EN_ERR_INT_EN | \
+ B_AX_CPU_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_CPU_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_CPU_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN)
+#define B_AX_CPU_DISP_IMR_SET (B_AX_CPU_PKT_FAIL_DBG_INT_EN | \
+ B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_TOTAL_LEN_ERR_INT_EN)
+
+#define B_AX_CR_PLD_LEN_ERR_INT_EN BIT(30)
+#define B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN BIT(27)
+#define B_AX_CR_DMA_PROCESS_ERR_INT_EN BIT(26)
+#define B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(25)
+#define B_AX_CR_SHIFT_EN_ERR_INT_EN BIT(24)
+#define B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN BIT(22)
+#define B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN BIT(21)
+#define B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN BIT(20)
+#define B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN BIT(19)
+#define B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN BIT(17)
+#define B_AX_CT_WD_LEN_OVER_ERR_INT_EN BIT(16)
+#define B_AX_CT_F2P_SEQ_ERR_INT_EN BIT(15)
+#define B_AX_CT_F2P_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(13)
+#define B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(12)
+#define B_AX_CT_PRE_SUB_ERR_INT_EN BIT(11)
+#define B_AX_CT_WD_CHKSUM_ERR_INT_EN BIT(10)
+#define B_AX_CT_CHANNEL_DMA_ERR_INT_EN BIT(9)
+#define B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN BIT(8)
+#define B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7)
+#define B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_AX_CT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_AX_CT_CH_ID_ERR_INT_EN BIT(2)
+#define B_AX_CT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_CPU_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_CT_CH_ID_ERR_INT_EN | \
+ B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN | \
+ B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_AX_CT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_CT_WD_CHKSUM_ERR_INT_EN | \
+ B_AX_CT_PRE_SUB_ERR_INT_EN | \
+ B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CT_F2P_QSEL_ERR_INT_EN | \
+ B_AX_CT_F2P_SEQ_ERR_INT_EN | \
+ B_AX_CT_WD_LEN_OVER_ERR_INT_EN | \
+ B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN | \
+ B_AX_CR_SHIFT_EN_ERR_INT_EN | \
+ B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CR_PLD_LEN_ERR_INT_EN)
+#define B_AX_CPU_DISP_IMR_SET_V1 (B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN)
#define R_AX_OTHER_DISPATCHER_ERR_IMR 0x8858
+#define B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN BIT(29)
+#define B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN BIT(28)
+#define B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN BIT(27)
+#define B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN BIT(26)
+#define B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN BIT(25)
+#define B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN BIT(24)
+#define B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(17)
+#define B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(16)
+#define B_AX_PLE_OUTPUT_ERR_INT_EN BIT(12)
+#define B_AX_PLE_RESP_ERR_INT_EN BIT(11)
+#define B_AX_PLE_BURST_NUM_ERR_INT_EN BIT(10)
+#define B_AX_PLE_NULL_PKT_ERR_INT_EN BIT(9)
+#define B_AX_PLE_FLOW_CTRL_ERR_INT_EN BIT(8)
+#define B_AX_WDE_OUTPUT_ERR_INT_EN BIT(4)
+#define B_AX_WDE_RESP_ERR_INT_EN BIT(3)
+#define B_AX_WDE_BURST_NUM_ERR_INT_EN BIT(2)
+#define B_AX_WDE_NULL_PKT_ERR_INT_EN BIT(1)
+#define B_AX_WDE_FLOW_CTRL_ERR_INT_EN BIT(0)
+#define B_AX_OTHER_DISP_IMR_CLR (B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN | \
+ B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN | \
+ B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \
+ B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \
+ B_AX_PLE_OUTPUT_ERR_INT_EN | \
+ B_AX_PLE_RESP_ERR_INT_EN | \
+ B_AX_PLE_BURST_NUM_ERR_INT_EN | \
+ B_AX_PLE_NULL_PKT_ERR_INT_EN | \
+ B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_WDE_OUTPUT_ERR_INT_EN | \
+ B_AX_WDE_RESP_ERR_INT_EN | \
+ B_AX_WDE_BURST_NUM_ERR_INT_EN | \
+ B_AX_WDE_NULL_PKT_ERR_INT_EN | \
+ B_AX_WDE_FLOW_CTRL_ERR_INT_EN)
+
+#define B_AX_REUSE_SIZE_ERR_INT_EN BIT(31)
+#define B_AX_REUSE_EN_ERR_INT_EN BIT(30)
+#define B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_AX_STF_OQT_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN BIT(27)
+#define B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN BIT(26)
+#define B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN BIT(25)
+#define B_AX_STF_CMD_OVERFLOW_ERR_INT_EN BIT(24)
+#define B_AX_REUSE_SIZE_ZERO_ERR_INT_EN BIT(23)
+#define B_AX_REUSE_PKT_CNT_ERR_INT_EN BIT(22)
+#define B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN BIT(21)
+#define B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN BIT(20)
+#define B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN BIT(19)
+#define B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN BIT(18)
+#define B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN BIT(17)
+#define B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN BIT(16)
+#define B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN BIT(15)
+#define B_AX_CDR_RX_TIMEOUT_ERR_INT_EN BIT(14)
+#define B_AX_PLE_RESPOSE_ERR_INT_EN BIT(11)
+#define B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN BIT(7)
+#define B_AX_HDR_RX_TIMEOUT_ERR_INT_EN BIT(6)
+#define B_AX_WDE_RESPONSE_ERR_INT_EN BIT(3)
+#define B_AX_OTHER_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_WDE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_WDE_NULL_PKT_ERR_INT_EN | \
+ B_AX_WDE_BURST_NUM_ERR_INT_EN | \
+ B_AX_WDE_RESPONSE_ERR_INT_EN | \
+ B_AX_WDE_OUTPUT_ERR_INT_EN | \
+ B_AX_HDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_PLE_NULL_PKT_ERR_INT_EN | \
+ B_AX_PLE_BURST_NUM_ERR_INT_EN | \
+ B_AX_PLE_RESPOSE_ERR_INT_EN | \
+ B_AX_PLE_OUTPUT_ERR_INT_EN | \
+ B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_REUSE_PKT_CNT_ERR_INT_EN | \
+ B_AX_REUSE_SIZE_ZERO_ERR_INT_EN | \
+ B_AX_STF_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN | \
+ B_AX_REUSE_EN_ERR_INT_EN | \
+ B_AX_REUSE_SIZE_ERR_INT_EN)
+#define B_AX_OTHER_DISP_IMR_SET_V1 (B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN)
#define R_AX_HCI_FC_CTRL 0x8A00
#define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10)
@@ -512,9 +977,168 @@
#define B_AX_WDE_START_BOUND_MASK GENMASK(13, 8)
#define B_AX_WDE_PAGE_SEL_MASK GENMASK(1, 0)
#define B_AX_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+
+#define R_AX_WDE_ERRFLAG_MSG 0x8C30
+#define B_AX_WDE_ERR_FLAG_MSG_MASK GENMASK(31, 0)
+
#define R_AX_WDE_ERR_FLAG_CFG 0x8C34
+
#define R_AX_WDE_ERR_IMR 0x8C38
+#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN BIT(13)
+#define B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN BIT(12)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN BIT(7)
+#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN BIT(6)
+#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN BIT(5)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4)
+#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN BIT(3)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1)
+#define B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_AX_WDE_IMR_CLR (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+
+#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9)
+#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8)
+#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6)
+#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3)
+#define B_AX_WDE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_AX_WDE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_AX_WDE_IMR_CLR_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+
#define R_AX_WDE_ERR_ISR 0x8C3C
+#define B_AX_WDE_DATCHN_RRDY_ERR BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR BIT(14)
+#define B_AX_WDE_QUE_DSTQUEID_ERR BIT(13)
+#define B_AX_WDE_QUE_CMDTYPE_ERR BIT(12)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR BIT(7)
+#define B_AX_WDE_GETNPG_PGOFST_ERR BIT(6)
+#define B_AX_WDE_GETNPG_STRPG_ERR BIT(5)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR BIT(4)
+#define B_AX_WDE_BUFRTN_SIZE_ERR BIT(3)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR BIT(2)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR BIT(1)
+#define B_AX_WDE_BUFREQ_QTAID_ERR BIT(0)
#define B_AX_WDE_MAX_SIZE_MASK GENMASK(27, 16)
#define B_AX_WDE_MIN_SIZE_MASK GENMASK(11, 0)
@@ -549,7 +1173,123 @@
#define R_AX_PLE_ERR_FLAG_CFG 0x9034
#define R_AX_PLE_ERR_IMR 0x9038
+#define B_AX_PLE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_PLE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN BIT(13)
+#define B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN BIT(12)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN BIT(7)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN BIT(6)
#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN BIT(5)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN BIT(3)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1)
+#define B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_AX_PLE_IMR_CLR (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_PLE_IMR_SET (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN)
+
+#define B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8)
+#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3)
+#define B_AX_PLE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_AX_PLE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_AX_PLE_IMR_CLR_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_AX_PLE_IMR_SET_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN)
#define R_AX_PLE_ERR_FLAG_ISR 0x903C
#define B_AX_PLE_MAX_SIZE_MASK GENMASK(27, 16)
@@ -604,12 +1344,97 @@
#define B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2)
#define B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1)
#define B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0)
+#define B_AX_WDRLS_IMR_EN_CLR (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define B_AX_WDRLS_IMR_SET (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define B_AX_WDRLS_IMR_SET_V1 (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+
#define R_AX_WDRLS_ERR_ISR 0x9434
+#define R_AX_BBRPT_COM_ERR_IMR 0x9608
+#define B_AX_BBRPT_COM_HANG_EN BIT(1)
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0)
+
#define R_AX_BBRPT_COM_ERR_IMR_ISR 0x960C
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR BIT(16)
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0)
+
+#define R_AX_BBRPT_CHINFO_ERR_IMR 0x9628
+#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7)
+#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4)
+#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3)
+#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2)
+#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0)
+#define R_AX_BBRPT_CHINFO_IMR_SET_V1 (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_TO_ERR_INT_EN)
+
#define R_AX_BBRPT_CHINFO_ERR_IMR_ISR 0x962C
+#define B_AX_BBPRT_CHIF_TO_ERR BIT(23)
+#define B_AX_BBPRT_CHIF_NULL_ERR BIT(22)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR BIT(21)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR BIT(20)
+#define B_AX_BBPRT_CHIF_HDRL_ERR BIT(19)
+#define B_AX_BBPRT_CHIF_BOVF_ERR BIT(18)
+#define B_AX_BBPRT_CHIF_OVF_ERR BIT(17)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR BIT(16)
+#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7)
+#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4)
+#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3)
+#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2)
+#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0)
+#define B_AX_BBRPT_CHINFO_IMR_CLR (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_TO_ERR_INT_EN)
+
+#define R_AX_BBRPT_DFS_ERR_IMR 0x9638
+#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+
#define R_AX_BBRPT_DFS_ERR_IMR_ISR 0x963C
+#define B_AX_BBRPT_DFS_TO_ERR BIT(16)
+#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+
#define R_AX_LA_ERRFLAG 0x966C
+#define B_AX_LA_ISR_DATA_LOSS_ERR BIT(16)
+#define B_AX_LA_IMR_DATA_LOSS_ERR BIT(0)
#define R_AX_WD_BUF_REQ 0x9800
#define R_AX_PL_BUF_REQ 0x9820
@@ -645,18 +1470,51 @@
#define R_AX_PL_CPUQ_OP_STATUS 0x983C
#define B_AX_WD_CPUQ_OP_STAT_DONE BIT(31)
#define B_AX_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0)
+
#define R_AX_CPUIO_ERR_IMR 0x9840
+#define B_AX_PLEQUE_OP_ERR_INT_EN BIT(12)
+#define B_AX_PLEBUF_OP_ERR_INT_EN BIT(8)
+#define B_AX_WDEQUE_OP_ERR_INT_EN BIT(4)
+#define B_AX_WDEBUF_OP_ERR_INT_EN BIT(0)
+#define B_AX_CPUIO_IMR_CLR (B_AX_WDEBUF_OP_ERR_INT_EN | \
+ B_AX_WDEQUE_OP_ERR_INT_EN | \
+ B_AX_PLEBUF_OP_ERR_INT_EN | \
+ B_AX_PLEQUE_OP_ERR_INT_EN)
+#define B_AX_CPUIO_IMR_SET (B_AX_WDEBUF_OP_ERR_INT_EN | \
+ B_AX_WDEQUE_OP_ERR_INT_EN | \
+ B_AX_PLEBUF_OP_ERR_INT_EN | \
+ B_AX_PLEQUE_OP_ERR_INT_EN)
+
#define R_AX_CPUIO_ERR_ISR 0x9844
#define R_AX_SEC_ERR_IMR_ISR 0x991C
#define R_AX_PKTIN_SETTING 0x9A00
#define B_AX_WD_ADDR_INFO_LENGTH BIT(1)
+
#define R_AX_PKTIN_ERR_IMR 0x9A20
+#define B_AX_PKTIN_GETPKTID_ERR_INT_EN BIT(0)
+
#define R_AX_PKTIN_ERR_ISR 0x9A24
#define R_AX_MPDU_TX_ERR_ISR 0x9BF0
#define R_AX_MPDU_TX_ERR_IMR 0x9BF4
+#define B_AX_TX_KSRCH_ERR_EN BIT(9)
+#define B_AX_TX_NW_TYPE_ERR_EN BIT(8)
+#define B_AX_TX_LLC_PRE_ERR_EN BIT(7)
+#define B_AX_TX_ETH_TYPE_ERR_EN BIT(6)
+#define B_AX_TX_HDR3_SIZE_ERR_INT_EN BIT(5)
+#define B_AX_TX_OFFSET_ERR_INT_EN BIT(4)
+#define B_AX_TX_MPDU_SIZE_ZERO_INT_EN BIT(3)
+#define B_AX_TX_NXT_ERRPKTID_INT_EN BIT(2)
+#define B_AX_TX_GET_ERRPKTID_INT_EN BIT(1)
+#define B_AX_MPDU_TX_IMR_SET_V1 (B_AX_TX_GET_ERRPKTID_INT_EN | \
+ B_AX_TX_NXT_ERRPKTID_INT_EN | \
+ B_AX_TX_MPDU_SIZE_ZERO_INT_EN | \
+ B_AX_TX_HDR3_SIZE_ERR_INT_EN | \
+ B_AX_TX_ETH_TYPE_ERR_EN | \
+ B_AX_TX_NW_TYPE_ERR_EN | \
+ B_AX_TX_KSRCH_ERR_EN)
#define R_AX_MPDU_PROC 0x9C00
#define B_AX_A_ICV_ERR BIT(1)
@@ -678,6 +1536,10 @@
#define R_AX_MPDU_RX_ERR_ISR 0x9CF0
#define R_AX_MPDU_RX_ERR_IMR 0x9CF4
+#define B_AX_RPT_ERR_INT_EN BIT(3)
+#define B_AX_MHDRLEN_ERR_INT_EN BIT(1)
+#define B_AX_GETPKTID_ERR_INT_EN BIT(0)
+#define B_AX_MPDU_RX_IMR_SET_V1 B_AX_RPT_ERR_INT_EN
#define R_AX_SEC_ENG_CTRL 0x9D00
#define B_AX_TX_PARTIAL_MODE BIT(11)
@@ -698,17 +1560,37 @@
#define R_AX_SEC_CAM_ACCESS 0x9D10
#define R_AX_SEC_CAM_RDATA 0x9D14
#define R_AX_SEC_CAM_WDATA 0x9D18
+
#define R_AX_SEC_DEBUG 0x9D1C
+#define B_AX_IMR_ERROR BIT(3)
+
+#define R_AX_SEC_DEBUG1 0x9D1C
+#define B_AX_TX_TIMEOUT_SEL_MASK GENMASK(31, 30)
+#define AX_TX_TO_VAL 0x2
+
#define R_AX_SEC_TX_DEBUG 0x9D20
#define R_AX_SEC_RX_DEBUG 0x9D24
#define R_AX_SEC_TRX_PKT_CNT 0x9D28
#define R_AX_SEC_TRX_BLK_CNT 0x9D2C
+#define R_AX_SEC_ERROR_FLAG_IMR 0x9D2C
+#define B_AX_RX_HANG_IMR BIT(1)
+#define B_AX_TX_HANG_IMR BIT(0)
+
#define R_AX_SS_CTRL 0x9E10
#define B_AX_SS_INIT_DONE_1 BIT(31)
#define B_AX_SS_WARM_INIT_FLG BIT(29)
+#define B_AX_SS_NONEMPTY_SS2FINFO_EN BIT(28)
#define B_AX_SS_EN BIT(0)
+#define R_AX_SS2FINFO_PATH 0x9E50
+#define B_AX_SS_UL_REL BIT(31)
+#define B_AX_SS_REL_QUEUE_MASK GENMASK(29, 24)
+#define B_AX_SS_REL_PORT_MASK GENMASK(18, 16)
+#define B_AX_SS_DEST_QUEUE_MASK GENMASK(13, 8)
+#define SS2F_PATH_WLCPU 0x0A
+#define B_AX_SS_DEST_PORT_MASK GENMASK(2, 0)
+
#define R_AX_SS_MACID_PAUSE_0 0x9EB0
#define B_AX_SS_MACID31_0_PAUSE_SH 0
#define B_AX_SS_MACID31_0_PAUSE_MASK GENMASK(31, 0)
@@ -726,9 +1608,47 @@
#define B_AX_SS_MACID127_96_PAUSE_MASK GENMASK(31, 0)
#define R_AX_STA_SCHEDULER_ERR_IMR 0x9EF0
+#define B_AX_PLE_B_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_RPT_HANG_TIMEOUT_INT_EN BIT(1)
+#define B_AX_SEARCH_HANG_TIMEOUT_INT_EN BIT(0)
+#define B_AX_STA_SCHEDULER_IMR_SET (B_AX_SEARCH_HANG_TIMEOUT_INT_EN | \
+ B_AX_RPT_HANG_TIMEOUT_INT_EN | \
+ B_AX_PLE_B_PKTID_ERR_INT_EN)
+
#define R_AX_STA_SCHEDULER_ERR_ISR 0x9EF4
#define R_AX_TXPKTCTL_ERR_IMR_ISR 0x9F1C
+#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR BIT(25)
+#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR BIT(24)
+#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR BIT(19)
+#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR BIT(18)
+#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR BIT(17)
+#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR BIT(16)
+#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9)
+#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN BIT(8)
+#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3)
+#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN BIT(2)
+#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN BIT(1)
+#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN BIT(0)
+#define B_AX_TXPKTCTL_IMR_B0_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B1_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B0_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B1_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+
#define R_AX_TXPKTCTL_ERR_IMR_ISR_B1 0x9F2C
#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9)
#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3)
@@ -755,6 +1675,42 @@
#define PRELD_NEXT_WND 1
#define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+#define R_AX_TXPKTCTL_B0_ERRFLAG_IMR 0x9F78
+#define B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B0_IMR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B0_IMR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B0_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B0_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B0_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B0_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_AX_TXPKTCTL_IMR_B0_CLR_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD | \
+ B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN | \
+ B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_AX_TXPKTCTL_IMR_B0_SET_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG)
+
#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
#define B_AX_B1_PRELD_FEN BIT(31)
#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
@@ -766,6 +1722,42 @@
#define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
#define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+#define R_AX_TXPKTCTL_B1_ERRFLAG_IMR 0x9FB8
+#define B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B1_IMR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B1_IMR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B1_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B1_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B1_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B1_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_AX_TXPKTCTL_IMR_B1_CLR_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD | \
+ B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN | \
+ B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_AX_TXPKTCTL_IMR_B1_SET_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG)
+
#define R_AX_AFE_CTRL1 0x0024
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
@@ -810,6 +1802,10 @@
#define R_AX_WMAC_RFMOD 0xC010
#define R_AX_WMAC_RFMOD_C1 0xE010
#define B_AX_WMAC_RFMOD_MASK GENMASK(1, 0)
+#define AX_WMAC_RFMOD_20M 0
+#define AX_WMAC_RFMOD_40M 1
+#define AX_WMAC_RFMOD_80M 2
+#define AX_WMAC_RFMOD_160M 3
#define R_AX_GID_POSITION0 0xC070
#define R_AX_GID_POSITION0_C1 0xE070
@@ -830,6 +1826,20 @@
#define B_AX_TXSC_40M_MASK GENMASK(7, 4)
#define B_AX_TXSC_20M_MASK GENMASK(3, 0)
+#define R_AX_CMAC_ERR_IMR 0xC160
+#define R_AX_CMAC_ERR_IMR_C1 0xE160
+#define B_AX_WMAC_TX_ERR_IND_EN BIT(7)
+#define B_AX_WMAC_RX_ERR_IND_EN BIT(6)
+#define B_AX_TXPWR_CTRL_ERR_IND_EN BIT(5)
+#define B_AX_PHYINTF_ERR_IND_EN BIT(4)
+#define B_AX_DMA_TOP_ERR_IND_EN BIT(3)
+#define B_AX_PTCL_TOP_ERR_IND_EN BIT(1)
+#define B_AX_SCHEDULE_TOP_ERR_IND_EN BIT(0)
+#define CMAC0_ERR_IMR_EN GENMASK(31, 0)
+#define CMAC1_ERR_IMR_EN GENMASK(31, 0)
+#define CMAC0_ERR_IMR_DIS 0
+#define CMAC1_ERR_IMR_DIS 0
+
#define R_AX_CMAC_ERR_ISR 0xC164
#define R_AX_CMAC_ERR_ISR_C1 0xE164
#define B_AX_WMAC_TX_ERR_IND BIT(7)
@@ -865,6 +1875,14 @@
#define R_AX_PREBKF_CFG_0_C1 0xE338
#define B_AX_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_AX_PREBKF_CFG_1 0xC33C
+#define R_AX_PREBKF_CFG_1_C1 0xE33C
+#define B_AX_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(30, 24)
+#define B_AX_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_AX_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_AX_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+#define SIFS_MACTXEN_T1 0x47
+
#define R_AX_CCA_CFG_0 0xC340
#define R_AX_CCA_CFG_0_C1 0xE340
#define B_AX_BTCCA_BRK_TXOP_EN BIT(9)
@@ -949,7 +1967,6 @@
#define R_AX_SCHEDULE_ERR_IMR 0xC3E8
#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
-#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
#define R_AX_SCHEDULE_ERR_ISR 0xC3EC
#define R_AX_SCHEDULE_ERR_ISR_C1 0xE3EC
@@ -964,6 +1981,10 @@
#define R_AX_SCH_DBG_C1 0xE3F8
#define B_AX_SCHEDULER_DBG_MASK GENMASK(31, 0)
+#define R_AX_SCH_EXT_CTRL 0xC3FC
+#define R_AX_SCH_EXT_CTRL_C1 0xE3FC
+#define B_AX_PORT_RST_TSF_ADV BIT(1)
+
#define R_AX_PORT_CFG_P0 0xC400
#define R_AX_PORT_CFG_P1 0xC440
#define R_AX_PORT_CFG_P2 0xC480
@@ -1124,6 +2145,18 @@
#define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0
#define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0
+#define R_AX_PTCL_COMMON_SETTING_0 0xC600
+#define R_AX_PTCL_COMMON_SETTING_0_C1 0xE600
+#define B_AX_PCIE_MODE_MASK GENMASK(15, 14)
+#define B_AX_CPUMGQ_LIFETIME_EN BIT(8)
+#define B_AX_MGQ_LIFETIME_EN BIT(7)
+#define B_AX_LIFETIME_EN BIT(6)
+#define B_AX_PTCL_TRIGGER_SS_EN_UL BIT(4)
+#define B_AX_PTCL_TRIGGER_SS_EN_1 BIT(3)
+#define B_AX_PTCL_TRIGGER_SS_EN_0 BIT(2)
+#define B_AX_CMAC_TX_MODE_1 BIT(1)
+#define B_AX_CMAC_TX_MODE_0 BIT(0)
+
#define R_AX_AMPDU_AGG_LIMIT 0xC610
#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
@@ -1168,6 +2201,18 @@
#define B_AX_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_AX_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_AX_PTCLRPT_FULL_HDL 0xC660
+#define R_AX_PTCLRPT_FULL_HDL_C1 0xE660
+#define B_AX_RPT_LATCH_PHY_TIME_MASK GENMASK(15, 12)
+#define B_AX_F2PCMD_FWWD_RLS_MODE BIT(9)
+#define B_AX_F2PCMD_RPT_EN BIT(8)
+#define B_AX_BCN_RPT_PATH_MASK GENMASK(7, 6)
+#define B_AX_SPE_RPT_PATH_MASK GENMASK(5, 4)
+#define FWD_TO_WLCPU 1
+#define B_AX_TX_RPT_PATH_MASK GENMASK(3, 2)
+#define B_AX_F2PCMDRPT_FULL_DROP BIT(1)
+#define B_AX_NON_F2PCMDRPT_FULL_DROP BIT(0)
+
#define R_AX_BT_PLT 0xC67C
#define R_AX_BT_PLT_C1 0xE67C
#define B_AX_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
@@ -1195,8 +2240,48 @@
#define R_AX_PTCL_IMR0 0xC6C0
#define R_AX_PTCL_IMR0_C1 0xE6C0
+#define B_AX_F2PCMD_PKTID_ERR_INT_EN BIT(31)
+#define B_AX_F2PCMD_RD_PKTID_ERR_INT_EN BIT(30)
+#define B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN BIT(29)
#define B_AX_F2PCMD_USER_ALLC_ERR_INT_EN BIT(28)
+#define B_AX_RX_SPF_U0_PKTID_ERR_INT_EN BIT(27)
+#define B_AX_TX_SPF_U1_PKTID_ERR_INT_EN BIT(26)
+#define B_AX_TX_SPF_U2_PKTID_ERR_INT_EN BIT(25)
+#define B_AX_TX_SPF_U3_PKTID_ERR_INT_EN BIT(24)
#define B_AX_TX_RECORD_PKTID_ERR_INT_EN BIT(23)
+#define B_AX_F2PCMD_EMPTY_ERR_INT_EN BIT(15)
+#define B_AX_TWTSP_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_BCNQ_ORDER_ERR_INT_EN BIT(12)
+#define B_AX_Q_PKTID_ERR_INT_EN BIT(11)
+#define B_AX_D_PKTID_ERR_INT_EN BIT(10)
+#define B_AX_TXPRT_FULL_DROP_ERR_INT_EN BIT(9)
+#define B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN BIT(8)
+#define B_AX_FSM1_TIMEOUT_ERR_INT_EN BIT(1)
+#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_AX_PTCL_IMR_CLR (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
+ B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN | \
+ B_AX_TXPRT_FULL_DROP_ERR_INT_EN | \
+ B_AX_D_PKTID_ERR_INT_EN | \
+ B_AX_Q_PKTID_ERR_INT_EN | \
+ B_AX_BCNQ_ORDER_ERR_INT_EN | \
+ B_AX_TWTSP_QSEL_ERR_INT_EN | \
+ B_AX_F2PCMD_EMPTY_ERR_INT_EN | \
+ B_AX_TX_RECORD_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U3_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U2_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U1_PKTID_ERR_INT_EN | \
+ B_AX_RX_SPF_U0_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | \
+ B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_RD_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_PKTID_ERR_INT_EN)
+#define B_AX_PTCL_IMR_SET (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
+ B_AX_TX_RECORD_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_USER_ALLC_ERR_INT_EN)
+#define B_AX_PTCL_IMR_CLR_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_AX_FSM_TIMEOUT_ERR_INT_EN)
+#define B_AX_PTCL_IMR_SET_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_AX_FSM_TIMEOUT_ERR_INT_EN)
#define R_AX_PTCL_ISR0 0xC6C4
#define R_AX_PTCL_ISR0_C1 0xE6C4
@@ -1223,10 +2308,160 @@
#define R_AX_DLE_CTRL_C1 0xE800
#define B_AX_NO_RESERVE_PAGE_ERR_IMR BIT(23)
#define B_AX_RXDATA_FSM_HANG_ERROR_IMR BIT(15)
+#define B_AX_RXSTS_FSM_HANG_ERROR_IMR BIT(14)
+#define B_AX_DLE_IMR_CLR (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_AX_RXDATA_FSM_HANG_ERROR_IMR | \
+ B_AX_NO_RESERVE_PAGE_ERR_IMR)
+#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_AX_RXDATA_FSM_HANG_ERROR_IMR)
+
#define R_AX_RXDMA_PKT_INFO_0 0xC814
#define R_AX_RXDMA_PKT_INFO_1 0xC818
#define R_AX_RXDMA_PKT_INFO_2 0xC81C
+#define R_AX_RX_ERR_FLAG_IMR 0xC804
+#define R_AX_RX_ERR_FLAG_IMR_C1 0xE804
+#define B_AX_RX_GET_NULL_PKT_ERR_MSK BIT(30)
+#define B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK BIT(29)
+#define B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK BIT(28)
+#define B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK BIT(27)
+#define B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK BIT(26)
+#define B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK BIT(25)
+#define B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK BIT(24)
+#define B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK BIT(23)
+#define B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK BIT(22)
+#define B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK BIT(21)
+#define B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK BIT(20)
+#define B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK BIT(19)
+#define B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK BIT(18)
+#define B_AX_RX_RU0_ZERO_LEN_ERR_MSK BIT(17)
+#define B_AX_RX_RU1_ZERO_LEN_ERR_MSK BIT(16)
+#define B_AX_RX_RU2_ZERO_LEN_ERR_MSK BIT(15)
+#define B_AX_RX_RU3_ZERO_LEN_ERR_MSK BIT(14)
+#define B_AX_RX_RU4_ZERO_LEN_ERR_MSK BIT(13)
+#define B_AX_RX_RU5_ZERO_LEN_ERR_MSK BIT(12)
+#define B_AX_RX_RU6_ZERO_LEN_ERR_MSK BIT(11)
+#define B_AX_RX_RU7_ZERO_LEN_ERR_MSK BIT(10)
+#define B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK BIT(9)
+#define B_AX_RX_CSI_ZERO_LEN_ERR_MSK BIT(8)
+#define B_AX_PLE_DATA_OPT_FSM_HANG_MSK BIT(7)
+#define B_AX_PLE_RXDATA_REQ_BUF_FSM_HANG_MSK BIT(6)
+#define B_AX_PLE_TXRPT_REQ_BUF_FSM_HANG_MSK BIT(5)
+#define B_AX_PLE_WD_OPT_FSM_HANG_MSK BIT(4)
+#define B_AX_PLE_ENQ_FSM_HANG_MSK BIT(3)
+#define B_AX_RXDATA_ENQUE_ORDER_ERR_MSK BIT(2)
+#define B_AX_RXSTS_ENQUE_ORDER_ERR_MSK BIT(1)
+#define B_AX_RX_CSI_PKT_NUM_ERR_MSK BIT(0)
+#define B_AX_RX_ERR_IMR_CLR_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_GET_NULL_PKT_ERR_MSK)
+#define B_AX_RX_ERR_IMR_SET_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_GET_NULL_PKT_ERR_MSK)
+
+#define R_AX_TX_ERR_FLAG_IMR 0xC870
+#define R_AX_TX_ERR_FLAG_IMR_C1 0xE870
+#define B_AX_TX_RU0_FSM_HANG_ERR_MSK BIT(31)
+#define B_AX_TX_RU1_FSM_HANG_ERR_MSK BIT(30)
+#define B_AX_TX_RU2_FSM_HANG_ERR_MSK BIT(29)
+#define B_AX_TX_RU3_FSM_HANG_ERR_MSK BIT(28)
+#define B_AX_TX_RU4_FSM_HANG_ERR_MSK BIT(27)
+#define B_AX_TX_RU5_FSM_HANG_ERR_MSK BIT(26)
+#define B_AX_TX_RU6_FSM_HANG_ERR_MSK BIT(25)
+#define B_AX_TX_RU7_FSM_HANG_ERR_MSK BIT(24)
+#define B_AX_TX_RU8_FSM_HANG_ERR_MSK BIT(23)
+#define B_AX_TX_RU9_FSM_HANG_ERR_MSK BIT(22)
+#define B_AX_TX_RU10_FSM_HANG_ERR_MSK BIT(21)
+#define B_AX_TX_RU11_FSM_HANG_ERR_MSK BIT(20)
+#define B_AX_TX_RU12_FSM_HANG_ERR_MSK BIT(19)
+#define B_AX_TX_RU13_FSM_HANG_ERR_MSK BIT(18)
+#define B_AX_TX_RU14_FSM_HANG_ERR_MSK BIT(17)
+#define B_AX_TX_RU15_FSM_HANG_ERR_MSK BIT(16)
+#define B_AX_TX_CSI_FSM_HANG_ERR_MSK BIT(15)
+#define B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK BIT(14)
+#define B_AX_TX_ERR_IMR_CLR_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \
+ B_AX_TX_CSI_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU7_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU6_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU5_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU4_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU3_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU2_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU1_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU0_FSM_HANG_ERR_MSK)
+#define B_AX_TX_ERR_IMR_SET_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \
+ B_AX_TX_CSI_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU7_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU6_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU5_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU4_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU3_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU2_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU1_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU0_FSM_HANG_ERR_MSK)
+
+#define R_AX_TCR0 0xCA00
+#define R_AX_TCR0_C1 0xEA00
+#define B_AX_TCR_ZLD_NUM_MASK GENMASK(31, 24)
+#define B_AX_TCR_UDF_EN BIT(23)
+#define B_AX_TCR_UDF_THSD_MASK GENMASK(22, 16)
+#define TCR_UDF_THSD 0x6
+#define B_AX_TCR_ERRSTEN_MASK GENMASK(15, 10)
+#define B_AX_TCR_VHTSIGA1_TXPS BIT(9)
+#define B_AX_TCR_PLCP_ERRHDL_EN BIT(8)
+#define B_AX_TCR_PADSEL BIT(7)
+#define B_AX_TCR_MASK_SIGBCRC BIT(6)
+#define B_AX_TCR_SR_VAL15_ALLOW BIT(5)
+#define B_AX_TCR_EN_EOF BIT(4)
+#define B_AX_TCR_EN_SCRAM_INC BIT(3)
+#define B_AX_TCR_EN_20MST BIT(2)
+#define B_AX_TCR_CRC BIT(1)
+#define B_AX_TCR_DISGCLK BIT(0)
+
#define R_AX_TCR1 0xCA04
#define R_AX_TCR1_C1 0xEA04
#define B_AX_TXDFIFO_THRESHOLD GENMASK(31, 28)
@@ -1250,6 +2485,17 @@
#define R_AX_PPWRBIT_SETTING 0xCA0C
#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
+#define R_AX_TXD_FIFO_CTRL 0xCA1C
+#define R_AX_TXD_FIFO_CTRL_C1 0xEA1C
+#define B_AX_NON_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(28, 24)
+#define B_AX_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(20, 16)
+#define B_AX_TXDFIFO_HIGH_MCS_THRE_MASK GENMASK(15, 12)
+#define TXDFIFO_HIGH_MCS_THRE 0x7
+#define B_AX_TXDFIFO_LOW_MCS_THRE_MASK GENMASK(11, 8)
+#define TXDFIFO_LOW_MCS_THRE 0x7
+#define B_AX_HIGH_MCS_PHY_RATE_MASK GENMASK(7, 4)
+#define B_AX_BW_PHY_RATE_MASK GENMASK(1, 0)
+
#define R_AX_MACTX_DBG_SEL_CNT 0xCA20
#define R_AX_MACTX_DBG_SEL_CNT_C1 0xEA20
#define B_AX_MACTX_MPDU_CNT GENMASK(31, 24)
@@ -1311,6 +2557,16 @@
#define R_AX_MAC_LOOPBACK_C1 0xEC20
#define B_AX_MACLBK_EN BIT(0)
+#define R_AX_WMAC_NAV_CTL 0xCC80
+#define R_AX_WMAC_NAV_CTL_C1 0xEC80
+#define B_AX_WMAC_NAV_UPPER_EN BIT(26)
+#define B_AX_WMAC_0P125US_TIMER_MASK GENMASK(25, 18)
+#define B_AX_WMAC_PLCP_UP_NAV_EN BIT(17)
+#define B_AX_WMAC_TF_UP_NAV_EN BIT(16)
+#define B_AX_WMAC_NAV_UPPER_MASK GENMASK(15, 8)
+#define NAV_12MS 0xBC
+#define B_AX_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0)
+
#define R_AX_RXTRIG_TEST_USER_2 0xCCB0
#define R_AX_RXTRIG_TEST_USER_2_C1 0xECB0
#define B_AX_RXTRIG_MACID_MASK GENMASK(31, 24)
@@ -1320,6 +2576,38 @@
#define B_AX_RXTRIG_EN BIT(16)
#define B_AX_RXTRIG_USERINFO_2_MASK GENMASK(15, 0)
+#define R_AX_TRXPTCL_ERROR_INDICA_MASK 0xCCBC
+#define R_AX_TRXPTCL_ERROR_INDICA_MASK_C1 0xECBC
+#define B_AX_WMAC_MODE BIT(22)
+#define B_AX_WMAC_TIMETOUT_THR_MASK GENMASK(21, 16)
+#define B_AX_RMAC_FTM BIT(8)
+#define B_AX_RMAC_CSI BIT(7)
+#define B_AX_TMAC_MIMO_CTRL BIT(6)
+#define B_AX_TMAC_RXTB BIT(5)
+#define B_AX_TMAC_HWSIGB_GEN BIT(4)
+#define B_AX_TMAC_TXPLCP BIT(3)
+#define B_AX_TMAC_RESP BIT(2)
+#define B_AX_TMAC_TXCTL BIT(1)
+#define B_AX_TMAC_MACTX BIT(0)
+#define B_AX_TMAC_IMR_CLR_V1 (B_AX_TMAC_MACTX | \
+ B_AX_TMAC_TXCTL | \
+ B_AX_TMAC_RESP | \
+ B_AX_TMAC_TXPLCP | \
+ B_AX_TMAC_HWSIGB_GEN | \
+ B_AX_TMAC_RXTB | \
+ B_AX_TMAC_MIMO_CTRL | \
+ B_AX_RMAC_CSI | \
+ B_AX_RMAC_FTM)
+#define B_AX_TMAC_IMR_SET_V1 (B_AX_TMAC_MACTX | \
+ B_AX_TMAC_TXCTL | \
+ B_AX_TMAC_RESP | \
+ B_AX_TMAC_TXPLCP | \
+ B_AX_TMAC_HWSIGB_GEN | \
+ B_AX_TMAC_RXTB | \
+ B_AX_TMAC_MIMO_CTRL | \
+ B_AX_RMAC_CSI | \
+ B_AX_RMAC_FTM)
+
#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0
#define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0
#define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0)
@@ -1334,11 +2622,55 @@
#define R_AX_TMAC_ERR_IMR_ISR 0xCCEC
#define R_AX_TMAC_ERR_IMR_ISR_C1 0xECEC
+#define B_AX_TMAC_TXPLCP_ERR_CLR BIT(19)
+#define B_AX_TMAC_RESP_ERR_CLR BIT(18)
+#define B_AX_TMAC_TXCTL_ERR_CLR BIT(17)
+#define B_AX_TMAC_MACTX_ERR_CLR BIT(16)
+#define B_AX_TMAC_TXPLCP_ERR BIT(14)
+#define B_AX_TMAC_RESP_ERR BIT(13)
+#define B_AX_TMAC_TXCTL_ERR BIT(12)
+#define B_AX_TMAC_MACTX_ERR BIT(11)
+#define B_AX_TMAC_TXPLCP_INT_EN BIT(10)
+#define B_AX_TMAC_RESP_INT_EN BIT(9)
+#define B_AX_TMAC_TXCTL_INT_EN BIT(8)
+#define B_AX_TMAC_MACTX_INT_EN BIT(7)
+#define B_AX_WMAC_INT_MODE BIT(6)
+#define B_AX_TMAC_TIMETOUT_THR_MASK GENMASK(5, 0)
+#define B_AX_TMAC_IMR_CLR (B_AX_TMAC_MACTX_INT_EN | \
+ B_AX_TMAC_TXCTL_INT_EN | \
+ B_AX_TMAC_RESP_INT_EN | \
+ B_AX_TMAC_TXPLCP_INT_EN)
+#define B_AX_TMAC_IMR_SET (B_AX_TMAC_MACTX_INT_EN | \
+ B_AX_TMAC_TXCTL_INT_EN | \
+ B_AX_TMAC_RESP_INT_EN | \
+ B_AX_TMAC_TXPLCP_INT_EN)
#define R_AX_DBGSEL_TRXPTCL 0xCCF4
#define R_AX_DBGSEL_TRXPTCL_C1 0xECF4
#define B_AX_DBGSEL_TRXPTCL_MASK GENMASK(7, 0)
+#define R_AX_PHYINFO_ERR_IMR_V1 0xCCF8
+#define R_AX_PHYINFO_ERR_IMR_V1_C1 0xECF8
+#define B_AX_PHYINTF_TIMEOUT_THR_MSAK_V1 GENMASK(21, 16)
+#define B_AX_CSI_ON_TIMEOUT_EN BIT(5)
+#define B_AX_STS_ON_TIMEOUT_EN BIT(4)
+#define B_AX_DATA_ON_TIMEOUT_EN BIT(3)
+#define B_AX_OFDM_CCA_TIMEOUT_EN BIT(2)
+#define B_AX_CCK_CCA_TIMEOUT_EN BIT(1)
+#define B_AX_PHY_TXON_TIMEOUT_EN BIT(0)
+#define B_AX_PHYINFO_IMR_CLR_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_EN | \
+ B_AX_DATA_ON_TIMEOUT_EN | \
+ B_AX_STS_ON_TIMEOUT_EN | \
+ B_AX_CSI_ON_TIMEOUT_EN)
+#define B_AX_PHYINFO_IMR_SET_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_EN | \
+ B_AX_DATA_ON_TIMEOUT_EN | \
+ B_AX_STS_ON_TIMEOUT_EN | \
+ B_AX_CSI_ON_TIMEOUT_EN)
+
#define R_AX_PHYINFO_ERR_IMR 0xCCFC
#define R_AX_PHYINFO_ERR_IMR_C1 0xECFC
#define B_AX_CSI_ON_TIMEOUT BIT(29)
@@ -1354,6 +2686,12 @@
#define B_AX_CCK_CCA_TIMEOUT_INT_EN BIT(17)
#define B_AX_PHY_TXON_TIMEOUT_INT_EN BIT(16)
#define B_AX_PHYINTF_TIMEOUT_THR_MSAK GENMASK(5, 0)
+#define B_AX_PHYINFO_IMR_EN_ALL (B_AX_PHY_TXON_TIMEOUT_INT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_INT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_INT_EN | \
+ B_AX_DATA_ON_TIMEOUT_INT_EN | \
+ B_AX_STS_ON_TIMEOUT_INT_EN | \
+ B_AX_CSI_ON_TIMEOUT_INT_EN)
#define R_AX_PHYINFO_ERR_ISR 0xCCFC
#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC
@@ -1487,6 +2825,8 @@
#define R_AX_RESPBA_CAM_CTRL 0xCE3C
#define R_AX_RESPBA_CAM_CTRL_C1 0xEE3C
#define B_AX_SSN_SEL BIT(2)
+#define B_AX_BACAM_RST_MASK GENMASK(1, 0)
+#define S_AX_BACAM_RST_ALL 2
#define R_AX_PPDU_STAT 0xCE40
#define R_AX_PPDU_STAT_C1 0xEE40
@@ -1529,6 +2869,51 @@
#define B_AX_BMAC_DMA_TIMEOUT_FLAG BIT(2)
#define B_AX_BMAC_DATA_ON_TO_IDLE_TIMEOUT_FLAG BIT(1)
#define B_AX_BMAC_CCA_TO_IDLE_TIMEOUT_FLAG BIT(0)
+#define B_AX_RMAC_IMR_CLR (B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DMA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CCA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CSI_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN)
+#define B_AX_RMAC_IMR_SET (B_AX_RMAC_DMA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CSI_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN)
+
+#define R_AX_RX_ERR_IMR 0xCEF8
+#define R_AX_RX_ERR_IMR_C1 0xEEF8
+#define B_AX_RX_ERR_TRIG_ACT_TO_MSK BIT(9)
+#define B_AX_RX_ERR_STS_ACT_TO_MSK BIT(8)
+#define B_AX_RX_ERR_CSI_ACT_TO_MSK BIT(7)
+#define B_AX_RX_ERR_ACT_TO_MSK BIT(6)
+#define B_AX_CSI_DATAON_ASSERT_TO_MSK BIT(5)
+#define B_AX_DATAON_ASSERT_TO_MSK BIT(4)
+#define B_AX_CCA_ASSERT_TO_MSK BIT(3)
+#define B_AX_RX_ERR_DMA_TO_MSK BIT(2)
+#define B_AX_RX_ERR_DATA_TO_MSK BIT(1)
+#define B_AX_RX_ERR_CCA_TO_MSK BIT(0)
+#define B_AX_RMAC_IMR_CLR_V1 (B_AX_RX_ERR_CCA_TO_MSK | \
+ B_AX_RX_ERR_DATA_TO_MSK | \
+ B_AX_RX_ERR_DMA_TO_MSK | \
+ B_AX_CCA_ASSERT_TO_MSK | \
+ B_AX_DATAON_ASSERT_TO_MSK | \
+ B_AX_CSI_DATAON_ASSERT_TO_MSK | \
+ B_AX_RX_ERR_ACT_TO_MSK | \
+ B_AX_RX_ERR_CSI_ACT_TO_MSK | \
+ B_AX_RX_ERR_STS_ACT_TO_MSK | \
+ B_AX_RX_ERR_TRIG_ACT_TO_MSK)
+#define B_AX_RMAC_IMR_SET_V1 (B_AX_RX_ERR_CCA_TO_MSK | \
+ B_AX_RX_ERR_DATA_TO_MSK | \
+ B_AX_RX_ERR_DMA_TO_MSK | \
+ B_AX_CCA_ASSERT_TO_MSK | \
+ B_AX_DATAON_ASSERT_TO_MSK | \
+ B_AX_CSI_DATAON_ASSERT_TO_MSK | \
+ B_AX_RX_ERR_ACT_TO_MSK | \
+ B_AX_RX_ERR_CSI_ACT_TO_MSK | \
+ B_AX_RX_ERR_STS_ACT_TO_MSK | \
+ B_AX_RX_ERR_TRIG_ACT_TO_MSK)
#define R_AX_RMAC_PLCP_MON 0xCEF8
#define R_AX_RMAC_PLCP_MON_C1 0xEEF8
@@ -1577,22 +2962,104 @@
#define R_AX_PWR_MACID_LMT_TABLE0 0xD36C
#define R_AX_PWR_MACID_LMT_TABLE127 0xD568
+#define R_AX_PATH_COM0 0xD800
+#define AX_PATH_COM0_DFVAL 0x00000000
+#define AX_PATH_COM0_PATHA 0x08888880
+#define AX_PATH_COM0_PATHB 0x11111100
+#define AX_PATH_COM0_PATHAB 0x19999980
+#define R_AX_PATH_COM1 0xD804
+#define AX_PATH_COM1_DFVAL 0x00000000
+#define AX_PATH_COM1_PATHA 0x11111111
+#define AX_PATH_COM1_PATHB 0x22222222
+#define AX_PATH_COM1_PATHAB 0x33333333
+#define R_AX_PATH_COM2 0xD808
+#define AX_PATH_COM2_DFVAL 0x00000000
+#define AX_PATH_COM2_PATHA 0x01209111
+#define AX_PATH_COM2_PATHB 0x01209222
+#define AX_PATH_COM2_PATHAB 0x01209333
+#define R_AX_PATH_COM3 0xD80C
+#define AX_PATH_COM3_DFVAL 0x49249249
+#define R_AX_PATH_COM4 0xD810
+#define AX_PATH_COM4_DFVAL 0x1C9C9C49
+#define R_AX_PATH_COM5 0xD814
+#define AX_PATH_COM5_DFVAL 0x39393939
+#define R_AX_PATH_COM6 0xD818
+#define AX_PATH_COM6_DFVAL 0x39393939
+#define R_AX_PATH_COM7 0xD81C
+#define AX_PATH_COM7_DFVAL 0x39393939
+#define AX_PATH_COM7_PATHA 0x39393939
+#define AX_PATH_COM7_PATHB 0x39383939
+#define AX_PATH_COM7_PATHAB 0x39393939
+#define R_AX_PATH_COM8 0xD820
+#define AX_PATH_COM8_DFVAL 0x00000000
+#define AX_PATH_COM8_PATHA 0x00003939
+#define AX_PATH_COM8_PATHB 0x00003938
+#define AX_PATH_COM8_PATHAB 0x00003939
+#define R_AX_PATH_COM9 0xD824
+#define AX_PATH_COM9_DFVAL 0x000007C0
+#define R_AX_PATH_COM10 0xD828
+#define AX_PATH_COM10_DFVAL 0xE0000000
+#define R_AX_PATH_COM11 0xD82C
+#define AX_PATH_COM11_DFVAL 0x00000000
+#define R_P80_AT_HIGH_FREQ_BB_WRP 0xD848
+#define B_P80_AT_HIGH_FREQ_BB_WRP BIT(28)
+#define R_AX_TSSI_CTRL_HEAD 0xD908
+#define R_AX_BANDEDGE_CFG 0xD94C
+#define B_AX_BANDEDGE_CFG_IDX_MASK GENMASK(31, 30)
+#define R_AX_TSSI_CTRL_TAIL 0xD95C
+
#define R_AX_TXPWR_IMR 0xD9E0
#define R_AX_TXPWR_IMR_C1 0xF9E0
#define R_AX_TXPWR_ISR 0xD9E4
#define R_AX_TXPWR_ISR_C1 0xF9E4
#define R_AX_BTC_CFG 0xDA00
+#define B_AX_BTC_EN BIT(31)
+#define B_AX_EN_EXT_BT_PINMUX BIT(29)
+#define B_AX_BTC_RST BIT(28)
+#define B_AX_BTC_DBG_SRC_SEL BIT(27)
+#define B_AX_BTC_MODE_MASK GENMASK(25, 24)
+#define B_AX_INV_WL_ACT2 BIT(17)
+#define B_AX_BTG_LNA1_GAIN_SEL BIT(16)
+#define B_AX_COEX_DLY_CLK_MASK GENMASK(15, 8)
+#define B_AX_IGN_GNT_BT2_RX BIT(7)
+#define B_AX_IGN_GNT_BT2_TX BIT(6)
+#define B_AX_IGN_GNT_BT2 BIT(5)
+#define B_AX_BTC_DBG_SEL_MASK GENMASK(4, 3)
#define B_AX_DIS_BTC_CLK_G BIT(2)
+#define B_AX_GNT_WL_RX_CTRL BIT(1)
+#define B_AX_WL_SRC BIT(0)
+
+#define R_AX_RTK_MODE_CFG_V1 0xDA04
+#define R_AX_RTK_MODE_CFG_V1_C1 0xFA04
+#define B_AX_BT_BLE_EN_V1 BIT(24)
+#define B_AX_BT_ULTRA_EN BIT(16)
+#define B_AX_BT_L_RX_ULTRA_MASK GENMASK(15, 14)
+#define B_AX_BT_L_TX_ULTRA_MASK GENMASK(13, 12)
+#define B_AX_BT_H_RX_ULTRA_MASK GENMASK(11, 10)
+#define B_AX_BT_H_TX_ULTRA_MASK GENMASK(9, 8)
+#define B_AX_SAMPLE_CLK_MASK GENMASK(7, 0)
#define R_AX_WL_PRI_MSK 0xDA10
#define B_AX_PTA_WL_PRI_MASK_BCNQ BIT(8)
+#define R_AX_BT_CNT_CFG 0xDA10
+#define R_AX_BT_CNT_CFG_C1 0xFA10
+#define B_AX_BT_CNT_RST_V1 BIT(1)
+#define B_AX_BT_CNT_EN BIT(0)
+
+#define R_BTC_BT_CNT_HIGH 0xDA14
+#define R_BTC_BT_CNT_LOW 0xDA18
+
#define R_AX_BTC_FUNC_EN 0xDA20
#define R_AX_BTC_FUNC_EN_C1 0xFA20
#define B_AX_PTA_WL_TX_EN BIT(1)
#define B_AX_PTA_EDCCA_EN BIT(0)
+#define R_BTC_COEX_WL_REQ 0xDA24
+#define B_BTC_TX_BCN_HI BIT(22)
+#define B_BTC_RSP_ACK_HI BIT(10)
+
#define R_BTC_BREAK_TABLE 0xDA2C
#define BTC_BREAK_PARAM 0xf0ffffff
@@ -1620,6 +3087,8 @@
#define B_AX_WL_ACT_MASK_ENABLE BIT(1)
#define B_AX_ENHANCED_BT BIT(0)
+#define R_AX_BT_BREAK_TABLE 0xDA44
+
#define R_AX_BT_STAST_HIGH 0xDA44
#define B_AX_STATIS_BT_HI_RX_MASK GENMASK(31, 16)
#define B_AX_STATIS_BT_HI_TX_MASK GENMASK(15, 0)
@@ -1674,6 +3143,9 @@
#define R_AX_LTE_WDATA 0xDAF4
#define R_AX_LTE_RDATA 0xDAF8
+#define R_AX_MACID_ANT_TABLE 0xDC00
+#define R_AX_MACID_ANT_TABLE_LAST 0xDDFC
+
#define CMAC1_START_ADDR 0xE000
#define CMAC1_END_ADDR 0xFFFF
#define R_AX_CMAC_REG_END 0xFFFF
@@ -1719,6 +3191,7 @@
#define B_AX_GNT_BT_TX_SW_CTRL BIT(0)
#define RR_MOD 0x00
+#define RR_MOD_V1 0x10000
#define RR_MOD_IQK GENMASK(19, 4)
#define RR_MOD_DPK GENMASK(19, 5)
#define RR_MOD_MASK GENMASK(19, 16)
@@ -1730,6 +3203,7 @@
#define RR_MOD_V_DPK 0x5
#define RR_MOD_V_RXK1 0x6
#define RR_MOD_V_RXK2 0x7
+#define RR_MOD_NBW GENMASK(15, 14)
#define RR_MOD_M_RXG GENMASK(13, 4)
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MODOPT 0x01
@@ -1738,9 +3212,38 @@
#define RR_WLSEL_AG GENMASK(18, 16)
#define RR_RSV1 0x05
#define RR_RSV1_RST BIT(0)
+#define RR_BBDC 0x10005
+#define RR_BBDC_SEL BIT(0)
#define RR_DTXLOK 0x08
#define RR_RSV2 0x09
+#define RR_LOKVB 0x0a
+#define RR_LOKVB_COI GENMASK(19, 14)
+#define RR_LOKVB_COQ GENMASK(9, 4)
+#define RR_TXIG 0x11
+#define RR_TXIG_TG GENMASK(16, 12)
+#define RR_TXIG_GR1 GENMASK(6, 4)
+#define RR_TXIG_GR0 GENMASK(1, 0)
+#define RR_CHTR 0x17
+#define RR_CHTR_MOD GENMASK(11, 10)
+#define RR_CHTR_TXRX GENMASK(9, 0)
#define RR_CFGCH 0x18
+#define RR_CFGCH_V1 0x10018
+#define RR_CFGCH_BAND1 GENMASK(17, 16)
+#define CFGCH_BAND1_2G 0
+#define CFGCH_BAND1_5G 1
+#define CFGCH_BAND1_6G 3
+#define RR_CFGCH_BAND0 GENMASK(9, 8)
+#define CFGCH_BAND0_2G 0
+#define CFGCH_BAND0_5G 1
+#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW GENMASK(11, 10)
+#define RR_CFGCH_CH GENMASK(7, 0)
+#define CFGCH_BW_20M 3
+#define CFGCH_BW_40M 2
+#define CFGCH_BW_80M 1
+#define CFGCH_BW_160M 0
+#define RR_APK 0x19
+#define RR_APK_MOD GENMASK(5, 4)
#define RR_BTC 0x1a
#define RR_BTC_TXBB GENMASK(14, 12)
#define RR_BTC_RXBB GENMASK(11, 10)
@@ -1753,14 +3256,18 @@
#define RR_RXKPLL_OFF GENMASK(5, 0)
#define RR_RXKPLL_POW BIT(19)
#define RR_RSV4 0x1f
+#define RR_RSV4_AGH GENMASK(17, 16)
+#define RR_RSV4_PLLCH GENMASK(9, 0)
#define RR_RXK 0x20
-#define RR_RXK_PLLEN BIT(5)
-#define RR_RXK_SEL5G BIT(7)
#define RR_RXK_SEL2G BIT(8)
+#define RR_RXK_SEL5G BIT(7)
+#define RR_RXK_PLLEN BIT(5)
#define RR_LUTWA 0x33
#define RR_LUTWA_MASK GENMASK(9, 0)
+#define RR_LUTWA_M2 GENMASK(4, 0)
#define RR_LUTWD1 0x3e
#define RR_LUTWD0 0x3f
+#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
#define RR_TM_VAL GENMASK(6, 1)
@@ -1773,8 +3280,9 @@
#define RR_TXG2_ATT0 BIT(11)
#define RR_BSPAD 0x54
#define RR_TXGA 0x55
-#define RR_TXGA_LOK_EN BIT(0)
#define RR_TXGA_TRK_EN BIT(7)
+#define RR_TXGA_LOK_EXT GENMASK(4, 0)
+#define RR_TXGA_LOK_EN BIT(0)
#define RR_GAINTX 0x56
#define RR_GAINTX_ALL GENMASK(15, 0)
#define RR_GAINTX_PAD GENMASK(9, 5)
@@ -1797,52 +3305,84 @@
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
#define RR_TXATANK 0x64
+#define RR_TXATANK_LBSW2 GENMASK(17, 15)
#define RR_TXATANK_LBSW GENMASK(16, 15)
+#define RR_TXA2 0x65
+#define RR_TXA2_LDO GENMASK(19, 16)
#define RR_TRXIQ 0x66
#define RR_RSV6 0x6d
#define RR_TXPOW 0x7f
-#define RR_TXPOW_TXG BIT(1)
#define RR_TXPOW_TXA BIT(8)
+#define RR_TXPOW_TXAS BIT(7)
+#define RR_TXPOW_TXG BIT(1)
#define RR_RXPOW 0x80
#define RR_RXPOW_IQK GENMASK(17, 16)
#define RR_RXBB 0x83
+#define RR_RXBB_VOBUF GENMASK(15, 12)
#define RR_RXBB_C2G GENMASK(16, 10)
#define RR_RXBB_C1G GENMASK(9, 8)
#define RR_RXBB_ATTR GENMASK(7, 4)
#define RR_RXBB_ATTC GENMASK(2, 0)
+#define RR_RXG 0x84
+#define RR_RXG_IQKMOD GENMASK(19, 16)
#define RR_XGLNA2 0x85
#define RR_XGLNA2_SW GENMASK(1, 0)
+#define RR_RXAE 0x89
+#define RR_RXAE_IQKMOD GENMASK(3, 0)
#define RR_RXA 0x8a
#define RR_RXA_DPK GENMASK(9, 8)
#define RR_RXA2 0x8c
-#define RR_RXA2_C2 GENMASK(9, 3)
#define RR_RXA2_C1 GENMASK(12, 10)
+#define RR_RXA2_C2 GENMASK(9, 3)
+#define RR_RXA2_IATT GENMASK(7, 4)
+#define RR_RXA2_ATT GENMASK(3, 0)
#define RR_RXIQGEN 0x8d
#define RR_RXIQGEN_ATTL GENMASK(12, 8)
#define RR_RXIQGEN_ATTH GENMASK(14, 13)
#define RR_RXBB2 0x8f
-#define RR_EN_TIA_IDA GENMASK(11, 10)
#define RR_RXBB2_DAC_EN BIT(13)
+#define RR_RXBB2_CKT BIT(12)
+#define RR_EN_TIA_IDA GENMASK(11, 10)
+#define RR_RXBB2_IDAC GENMASK(11, 9)
+#define RR_RXBB2_EBW GENMASK(6, 5)
#define RR_XALNA2 0x90
#define RR_XALNA2_SW GENMASK(1, 0)
#define RR_DCK 0x92
+#define RR_DCK_DONE GENMASK(7, 5)
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
#define RR_DCK2 0x94
#define RR_DCK2_CYCLE GENMASK(7, 2)
+#define RR_DCKC 0x95
+#define RR_DCKC_CHK BIT(3)
+#define RR_IQGEN 0x97
+#define RR_IQGEN_BIAS GENMASK(11, 8)
+#define RR_TXIQK 0x98
+#define RR_TXIQK_ATT2 GENMASK(15, 12)
+#define RR_TIA 0x9e
+#define RR_TIA_N6 BIT(8)
#define RR_MIXER 0x9f
#define RR_MIXER_GN GENMASK(4, 3)
+#define RR_LOGEN 0xa3
+#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_XTALX2 0xb8
#define RR_MALSEL 0xbe
+#define RR_LCK_TRG 0xd3
+#define RR_LCK_TRGSEL BIT(8)
+#define RR_IQKPLL 0xdc
+#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_RCKD 0xde
#define RR_RCKD_POW GENMASK(19, 13)
#define RR_RCKD_BW BIT(2)
#define RR_TXADBG 0xde
#define RR_LUTDBG 0xdf
+#define RR_LUTDBG_TIA BIT(12)
#define RR_LUTDBG_LOK BIT(2)
#define RR_LUTWE2 0xee
+#define RR_LUTWE2_RTXBW BIT(2)
#define RR_LUTWE 0xef
#define RR_LUTWE_LOK BIT(2)
#define RR_RFC 0xf0
@@ -1863,6 +3403,10 @@
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
#define B_ANAPAR_14 GENMASK(15, 0)
+#define R_RFE_E_A2 0x0334
+#define R_RFE_O_SEL_A2 0x0338
+#define R_RFE_SEL0_A2 0x033C
+#define R_RFE_SEL32_A2 0x0340
#define R_SWSI_DATA_V1 0x0370
#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
@@ -1875,8 +3419,9 @@
#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
#define R_UPD_CLK_ADC 0x0700
-#define B_UPD_CLK_ADC_ON BIT(24)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
+#define B_UPD_CLK_ADC_ON BIT(24)
+#define B_ENABLE_CCK BIT(5)
#define R_RSTB_ASYNC 0x0704
#define B_RSTB_ASYNC_ALL BIT(1)
#define R_MAC_PIN_SEL 0x0734
@@ -1912,9 +3457,10 @@
#define R_PMAC_RXMOD 0x0994
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
-#define B_MAC_SEL_MOD GENMASK(4, 2)
-#define B_MAC_SEL_DPD_EN BIT(10)
+#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
#define B_MAC_SEL_PWR_EN BIT(16)
+#define B_MAC_SEL_DPD_EN BIT(10)
+#define B_MAC_SEL_MOD GENMASK(4, 2)
#define R_PMAC_TX_CTRL 0x09C0
#define B_PMAC_TXEN_DIS BIT(0)
#define R_PMAC_TX_PRD 0x09C4
@@ -1923,6 +3469,10 @@
#define B_PMAC_PTX_EN BIT(4)
#define R_PMAC_TX_CNT 0x09C8
#define B_PMAC_TX_CNT_MSK GENMASK(31, 0)
+#define R_P80_AT_HIGH_FREQ 0x09D8
+#define B_P80_AT_HIGH_FREQ BIT(26)
+#define R_DBCC_80P80_SEL_EVM_RPT 0x0A10
+#define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0)
#define R_CCX 0x0C00
#define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4)
#define B_MEASUREMENT_TRIG_MSK BIT(2)
@@ -1953,8 +3503,24 @@
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
#define B_IOQ_IQK_DPK_EN BIT(1)
+#define R_GNT_BT_WGT_EN 0x0C6C
+#define B_GNT_BT_WGT_EN BIT(21)
+#define R_PD_ARBITER_OFF 0x0C80
+#define B_PD_ARBITER_OFF BIT(31)
+#define R_SNDCCA_A1 0x0C9C
+#define B_SNDCCA_A1_EN GENMASK(19, 12)
+#define R_SNDCCA_A2 0x0CA0
+#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_RXHT_MCS_LIMIT 0x0D18
+#define B_RXHT_MCS_LIMIT GENMASK(9, 8)
+#define R_RXVHT_MCS_LIMIT 0x0D18
+#define B_RXVHT_MCS_LIMIT GENMASK(22, 21)
#define R_P0_EN_SOUND_WO_NDP 0x0D7C
#define B_P0_EN_SOUND_WO_NDP BIT(1)
+#define R_RXHE 0x0D80
+#define B_RXHETB_MAX_NSS GENMASK(25, 23)
+#define B_RXHE_MAX_NSS GENMASK(16, 14)
+#define B_RXHE_USER_MAX GENMASK(13, 6)
#define R_SPOOF_ASYNC_RST 0x0D84
#define B_SPOOF_ASYNC_RST BIT(15)
#define R_NDP_BRK0 0xDA0
@@ -1963,10 +3529,15 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_S0_HW_SI_DIS 0x1200
+#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
-#define B_P0_RXCK_VAL GENMASK(18, 16)
-#define B_P0_RXCK_ON BIT(19)
#define B_P0_RXCK_BW3 BIT(30)
+#define B_P0_TXCK_ALL GENMASK(19, 12)
+#define B_P0_RXCK_ON BIT(19)
+#define B_P0_RXCK_VAL GENMASK(18, 16)
+#define B_P0_TXCK_ON BIT(15)
+#define B_P0_TXCK_VAL GENMASK(14, 12)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
#define R_S0_RXDC 0x12D4
@@ -2019,6 +3590,8 @@
#define B_TXAGC_TP GENMASK(2, 0)
#define R_TSSI_THER 0x1C10
#define B_TSSI_THER GENMASK(29, 24)
+#define R_TXAGC_BTP 0x1CA0
+#define B_TXAGC_BTP GENMASK(31, 24)
#define R_TXAGC_BB 0x1C60
#define B_TXAGC_BB_OFT GENMASK(31, 16)
#define B_TXAGC_BB GENMASK(31, 24)
@@ -2027,6 +3600,11 @@
#define B_S0_ADDCK_Q GENMASK(19, 10)
#define R_ADC_FIFO 0x20fc
#define B_ADC_FIFO_RST GENMASK(31, 24)
+#define B_ADC_FIFO_RXK GENMASK(31, 16)
+#define B_ADC_FIFO_A3 BIT(28)
+#define B_ADC_FIFO_A2 BIT(24)
+#define B_ADC_FIFO_A1 BIT(20)
+#define B_ADC_FIFO_A0 BIT(16)
#define R_TXFIR0 0x2300
#define B_TXFIR_C01 GENMASK(23, 0)
#define R_TXFIR2 0x2304
@@ -2043,16 +3621,29 @@
#define B_TXFIR_CCD GENMASK(23, 0)
#define R_TXFIRE 0x231c
#define B_TXFIR_CEF GENMASK(23, 0)
+#define R_11B_RX_V1 0x2320
+#define B_11B_RXCCA_DIS_V1 BIT(0)
+#define R_RPL_OFST 0x2340
+#define B_RPL_OFST_MASK GENMASK(14, 8)
#define R_RXCCA 0x2344
#define B_RXCCA_DIS BIT(31)
+#define R_RXCCA_V1 0x2320
+#define B_RXCCA_DIS_V1 BIT(0)
#define R_RXSC 0x237C
#define B_RXSC_EN BIT(0)
#define R_RXSCOBC 0x23B0
#define B_RXSCOBC_TH GENMASK(18, 0)
#define R_RXSCOCCK 0x23B4
#define B_RXSCOCCK_TH GENMASK(18, 0)
+#define R_P80_AT_HIGH_FREQ_RU_ALLOC 0x2410
+#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1 BIT(14)
+#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
+#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
+#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
+#define R_S1_HW_SI_DIS 0x3200
+#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P1_DBGMOD 0x32B8
#define B_P1_DBGMOD_ON BIT(30)
#define R_S1_RXDC 0x32D4
@@ -2081,6 +3672,12 @@
#define R_CFO_TRK0 0x4404
#define R_CFO_TRK1 0x440C
#define B_CFO_TRK_MSK GENMASK(14, 10)
+#define R_T2F_GI_COMB 0x4424
+#define B_T2F_GI_COMB_EN BIT(2)
+#define R_BT_DYN_DC_EST_EN 0x441C
+#define B_BT_DYN_DC_EST_EN_MSK BIT(31)
+#define R_ASSIGN_SBD_OPT 0x4450
+#define B_ASSIGN_SBD_OPT_EN BIT(24)
#define R_DCFO_COMP_S0 0x448C
#define B_DCFO_COMP_S0_MSK GENMASK(11, 0)
#define R_DCFO_WEIGHT 0x4490
@@ -2095,6 +3692,22 @@
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
#define B_TXNSS_MAP_MSK GENMASK(20, 17)
+#define R_PCOEFF0_V1 0x45BC
+#define B_PCOEFF01_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF2_V1 0x45CC
+#define B_PCOEFF23_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF4_V1 0x45D0
+#define B_PCOEFF45_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF6_V1 0x45D4
+#define B_PCOEFF67_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF8_V1 0x45D8
+#define B_PCOEFF89_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFA_V1 0x45C0
+#define B_PCOEFFAB_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFC_V1 0x45C4
+#define B_PCOEFFCD_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFE_V1 0x45C8
+#define B_PCOEFFEF_MSK_V1 GENMASK(23, 0)
#define R_PATH0_IB_PKPW 0x4628
#define B_PATH0_IB_PKPW_MSK GENMASK(11, 6)
#define R_PATH0_LNA_ERR1 0x462C
@@ -2137,11 +3750,31 @@
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH0_G_LNA6_OP1DB_V1 0x4688
+#define B_PATH0_G_LNA6_OP1DB_V1 GENMASK(31, 24)
+#define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694
+#define B_PATH0_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
+#define R_PATH0_G_TIA1_LNA6_OP1DB_V1 0x4694
+#define B_PATH0_R_G_OFST_MASK GENMASK(23, 16)
+#define B_PATH0_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
+#define R_CDD_EVM_CHK_EN 0x46C0
+#define B_CDD_EVM_CHK_EN BIT(0)
+#define R_PATH0_BAND_SEL_V1 0x4738
+#define B_PATH0_BAND_SEL_MSK_V1 BIT(17)
+#define R_PATH0_BT_SHARE_V1 0x4738
+#define B_PATH0_BT_SHARE_V1 BIT(19)
+#define R_PATH0_BTG_PATH_V1 0x4738
+#define B_PATH0_BTG_PATH_V1 BIT(22)
#define R_P0_NBIIDX 0x469C
#define B_P0_NBIIDX_VAL GENMASK(11, 0)
#define B_P0_NBIIDX_NOTCH_EN BIT(12)
+#define R_P0_BACKOFF_IBADC_V1 0x469C
+#define B_P0_BACKOFF_IBADC_V1 GENMASK(31, 26)
+#define B_P0_NBIIDX_NOTCH_EN_V1 BIT(12)
#define R_P1_MODE 0x4718
#define B_P1_MODE_SEL GENMASK(31, 30)
+#define R_P0_AGC_CTL 0x4730
+#define B_P0_AGC_EN BIT(31)
#define R_PATH1_LNA_INIT 0x473C
#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24)
#define R_PATH1_TIA_INIT 0x4748
@@ -2150,10 +3783,22 @@
#define B_PATH1_BTG_SHEN GENMASK(18, 17)
#define R_PATH1_RXB_INIT 0x472C
#define B_PATH1_RXB_INIT_IDX_MSK GENMASK(9, 5)
+#define R_PATH1_G_LNA6_OP1DB_V1 0x476C
+#define B_PATH1_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
+#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
+#define R_PATH1_G_TIA1_LNA6_OP1DB_V1 0x4778
+#define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
+#define R_PATH1_BAND_SEL_V1 0x4AA4
+#define B_PATH1_BAND_SEL_MSK_V1 BIT(17)
+#define R_PATH1_BT_SHARE_V1 0x4AA4
+#define B_PATH1_BT_SHARE_V1 BIT(19)
+#define R_PATH1_BTG_PATH_V1 0x4AA4
+#define B_PATH1_BTG_PATH_V1 BIT(22)
#define R_P1_NBIIDX 0x4770
#define B_P1_NBIIDX_VAL GENMASK(11, 0)
#define B_P1_NBIIDX_NOTCH_EN BIT(12)
@@ -2165,9 +3810,56 @@
#define R_FC0_BW 0x4974
#define B_FC0_BW_INV GENMASK(6, 0)
#define B_FC0_BW_SET GENMASK(31, 30)
+#define B_ANT_RX_BT_SEG0 GENMASK(25, 22)
+#define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18)
+#define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14)
#define R_CHBW_MOD 0x4978
-#define B_CHBW_MOD_PRICH GENMASK(11, 8)
+#define B_BT_SHARE BIT(14)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define B_CHBW_MOD_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SEG0 GENMASK(3, 0)
+#define R_P1_BACKOFF_IBADC_V1 0x49F0
+#define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26)
+#define R_BK_FC0_INV_V1 0x4A1C
+#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_CCK_FC0_INV_V1 0x4A20
+#define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_P1_AGC_CTL 0x4A9C
+#define B_P1_AGC_EN BIT(31)
+#define R_PATH0_RXBB_V1 0x4AD4
+#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0)
+#define R_PATH1_RXBB_V1 0x4AE0
+#define B_PATH1_RXBB_MSK_V1 GENMASK(31, 0)
+#define R_PATH0_BT_BACKOFF_V1 0x4AE4
+#define B_PATH0_BT_BACKOFF_V1 GENMASK(23, 0)
+#define R_PATH1_BT_BACKOFF_V1 0x4AEC
+#define B_PATH1_BT_BACKOFF_V1 GENMASK(23, 0)
+#define R_PATH0_FRC_FIR_TYPE_V1 0x4C00
+#define B_PATH0_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH0_5MDET 0x4C4C
+#define B_PATH0_5MDET_EN BIT(12)
+#define B_PATH0_5MDET_SB2 BIT(8)
+#define B_PATH0_5MDET_SB0 BIT(6)
+#define B_PATH0_5MDET_TH GENMASK(5, 0)
+#define R_PATH1_FRC_FIR_TYPE_V1 0x4CC4
+#define B_PATH1_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH1_5MDET 0x4D10
+#define B_PATH1_5MDET_EN BIT(12)
+#define B_PATH1_5MDET_SB2 BIT(8)
+#define B_PATH1_5MDET_SB0 BIT(6)
+#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_RPL_BIAS_COMP 0x4DF0
+#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
+#define R_RPL_PATHAB 0x4E0C
+#define B_RPL_PATHB_MASK GENMASK(23, 16)
+#define B_RPL_PATHA_MASK GENMASK(15, 8)
+#define R_RSSI_M_PATHAB 0x4E2C
+#define B_RSSI_M_PATHB_MASK GENMASK(15, 8)
+#define B_RSSI_M_PATHA_MASK GENMASK(7, 0)
+#define R_FC0_V1 0x4E30
+#define B_FC0_MSK_V1 GENMASK(12, 0)
+#define R_RX_BW40_2XFFT_EN_V1 0x4E30
+#define B_RX_BW40_2XFFT_EN_MSK_V1 BIT(26)
#define R_DCFO_COMP_S0_V1 0x4A40
#define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0)
#define R_BMODE_PDTH_V1 0x4B64
@@ -2180,10 +3872,21 @@
#define B_CFO_COMP_VALID_BIT BIT(29)
#define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24)
#define B_CFO_COMP_VAL_MSK GENMASK(11, 0)
+#define R_UPD_CLK 0x5670
+#define B_DAC_VAL BIT(31)
+#define B_ACK_VAL GENMASK(30, 29)
+#define B_DPD_DIS BIT(14)
+#define B_DPD_GDIS BIT(13)
+#define B_IQK_RFC_ON BIT(1)
+#define R_TXPWRB 0x56CC
+#define B_TXPWRB_ON BIT(28)
+#define B_TXPWRB_VAL GENMASK(27, 19)
#define R_DPD_OFT_EN 0x5800
#define B_DPD_OFT_EN BIT(28)
#define R_DPD_OFT_ADDR 0x5804
#define B_DPD_OFT_ADDR GENMASK(31, 27)
+#define R_TXPWRB_H 0x580c
+#define B_TXPWRB_RDY BIT(15)
#define R_P0_TMETER 0x5810
#define B_P0_TMETER GENMASK(15, 10)
#define B_P0_TMETER_DIS BIT(16)
@@ -2197,6 +3900,16 @@
#define R_P0_RFCTM 0x5864
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
+#define R_P0_TRSW 0x5868
+#define B_P0_TRSW_B BIT(0)
+#define B_P0_TRSW_A BIT(1)
+#define B_P0_TRSW_X BIT(2)
+#define B_P0_TRSW_SO_A2 GENMASK(7, 5)
+#define R_P0_RFM 0x5894
+#define B_P0_RFM_DIS_WL BIT(7)
+#define B_P0_RFM_TX_OPT BIT(6)
+#define B_P0_RFM_BT_EN BIT(5)
+#define B_P0_RFM_OUT GENMASK(4, 0)
#define R_P0_TXDPD 0x58D4
#define B_P0_TXDPD GENMASK(31, 28)
#define R_P0_TXPW_RSTB 0x58DC
@@ -2226,6 +3939,8 @@
#define B_S0_DACKQ7_K GENMASK(15, 8)
#define R_S0_DACKQ8 0x5E98
#define B_S0_DACKQ8_K GENMASK(15, 8)
+#define R_RPL_BIAS_COMP1 0x6DF0
+#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
#define R_P1_TMETER 0x7810
#define B_P1_TMETER GENMASK(15, 10)
#define B_P1_TMETER_DIS BIT(16)
@@ -2278,20 +3993,28 @@
#define R_IQK_DIF2 0x8024
#define B_IQK_DIF2_RXPI GENMASK(19, 0)
#define R_IQK_DIF4 0x802C
-#define B_IQK_DIF4_TXT GENMASK(11, 0)
#define B_IQK_DIF4_RXT GENMASK(27, 16)
+#define B_IQK_DIF4_TXT GENMASK(11, 0)
+#define IQK_DF4_TXT_8_25MHZ 0x021
#define R_IQK_CFG 0x8034
#define B_IQK_CFG_SET GENMASK(5, 4)
+#define R_TPG_SEL 0x8068
#define R_TPG_MOD 0x806C
#define B_TPG_MOD_F GENMASK(2, 1)
#define R_MDPK_SYNC 0x8070
#define B_MDPK_SYNC_SEL BIT(31)
#define B_MDPK_SYNC_MAN GENMASK(31, 28)
#define R_MDPK_RX_DCK 0x8074
+#define B_MDPK_RX_DCK_EN BIT(31)
+#define R_KIP_MOD 0x8078
+#define B_KIP_MOD GENMASK(19, 0)
#define R_NCTL_RW 0x8080
#define R_KIP_SYSCFG 0x8088
#define R_KIP_CLK 0x808C
+#define R_DPK_IDL 0x809C
+#define B_DPK_IDL BIT(8)
#define R_LDL_NORM 0x80A0
+#define B_LDL_NORM_MA BIT(16)
#define B_LDL_NORM_PN GENMASK(12, 8)
#define B_LDL_NORM_OP GENMASK(1, 0)
#define R_DPK_CTL 0x80B0
@@ -2302,12 +4025,19 @@
#define B_DPK_CFG2_ST BIT(14)
#define R_DPK_CFG3 0x80C0
#define R_KPATH_CFG 0x80D0
+#define B_KPATH_CFG_ED GENMASK(21, 20)
#define R_KIP_RPT1 0x80D4
#define B_KIP_RPT1_SEL GENMASK(21, 16)
#define R_SRAM_IQRX 0x80D8
#define R_GAPK 0x80E0
#define B_GAPK_ADR BIT(0)
#define R_SRAM_IQRX2 0x80E8
+#define R_DPK_MPA 0x80EC
+#define B_DPK_MPA_T0 BIT(10)
+#define B_DPK_MPA_T1 BIT(9)
+#define B_DPK_MPA_T2 BIT(8)
+#define R_DPK_WR 0x80F4
+#define B_DPK_WR_ST BIT(29)
#define R_DPK_TRK 0x80f0
#define B_DPK_TRK_DIS BIT(31)
#define R_RPT_COM 0x80FC
@@ -2315,8 +4045,11 @@
#define B_PRT_COM_DCI GENMASK(27, 16)
#define B_PRT_COM_CORV GENMASK(15, 8)
#define B_PRT_COM_DCQ GENMASK(11, 0)
+#define B_PRT_COM_RXOV BIT(8)
#define B_PRT_COM_GL GENMASK(7, 4)
#define B_PRT_COM_CORI GENMASK(7, 0)
+#define B_PRT_COM_RXBB GENMASK(5, 0)
+#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
#define B_COEF_SEL_IQC BIT(0)
#define B_COEF_SEL_MDPD BIT(8)
@@ -2341,17 +4074,27 @@
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
#define B_CFIR_LUT_SEL BIT(8)
+#define B_CFIR_LUT_SET BIT(4)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
+#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
#define B_CFIR_LUT_GP GENMASK(1, 0)
+#define R_DPK_GN 0x819C
+#define B_DPK_GN_EN GENMASK(17, 16)
+#define B_DPK_GN_AG GENMASK(9, 0)
#define R_DPD_V1 0x81a0
+#define B_DPD_LBK BIT(7)
#define R_DPD_CH0 0x81AC
#define R_DPD_BND 0x81B4
#define R_DPD_CH0A 0x81BC
+#define B_DPD_MEN GENMASK(31, 28)
+#define B_DPD_ORDER GENMASK(26, 24)
+#define B_DPD_SEL GENMASK(13, 8)
#define R_TXAGC_RFK 0x81C4
#define B_TXAGC_RFK_CH0 GENMASK(5, 0)
#define R_DPD_COM 0x81C8
#define R_KIP_IQP 0x81CC
+#define B_KIP_IQP_SW GENMASK(13, 12)
#define B_KIP_IQP_IQSW GENMASK(5, 0)
#define R_KIP_RPT 0x81D4
#define B_KIP_RPT_SEL GENMASK(21, 16)
@@ -2359,8 +4102,15 @@
#define R_LOAD_COEF 0x81DC
#define B_LOAD_COEF_MDPD BIT(16)
#define B_LOAD_COEF_CFIR GENMASK(1, 0)
+#define B_LOAD_COEF_DI BIT(1)
#define B_LOAD_COEF_AUTO BIT(0)
+#define R_DPK_GL 0x81F0
+#define B_DPK_GL_A0 GENMASK(31, 28)
+#define B_DPK_GL_A1 GENMASK(17, 0)
#define R_RPT_PER 0x81FC
+#define B_RPT_PER_TSSI GENMASK(28, 16)
+#define B_RPT_PER_OF GENMASK(15, 8)
+#define B_RPT_PER_TH GENMASK(5, 0)
#define R_RXCFIR_P0C0 0x8D40
#define R_RXCFIR_P0C1 0x8D84
#define R_RXCFIR_P0C2 0x8DC8
@@ -2393,5 +4143,112 @@
#define R_IQKINF2 0x9FE8
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
-#define B_IQKINF2_NCTLV GENMAKS(7, 0)
+#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_DCOF0 0xC000
+#define B_DCOF0_V GENMASK(4, 1)
+#define R_DCOF1 0xC004
+#define B_DCOF1_S BIT(0)
+#define R_DCOF8 0xC020
+#define B_DCOF8_V GENMASK(4, 1)
+#define R_DACK_S0P0 0xC040
+#define B_DACK_S0P0_OK BIT(31)
+#define R_DACK_BIAS00 0xc048
+#define B_DACK_BIAS00 GENMASK(11, 2)
+#define R_DACK_S0P2 0xC05C
+#define B_DACK_S0M0 GENMASK(31, 24)
+#define B_DACK_S0P2_OK BIT(2)
+#define R_DACK_DADCK00 0xC060
+#define B_DACK_DADCK00 GENMASK(31, 24)
+#define R_DACK_S0P1 0xC064
+#define B_DACK_S0P1_OK BIT(31)
+#define R_DACK_BIAS01 0xC06C
+#define B_DACK_BIAS01 GENMASK(11, 2)
+#define R_DACK_S0P3 0xC080
+#define B_DACK_S0M1 GENMASK(31, 24)
+#define B_DACK_S0P3_OK BIT(2)
+#define R_DACK_DADCK01 0xC084
+#define B_DACK_DADCK01 GENMASK(31, 24)
+#define R_DRCK 0xC0C4
+#define B_DRCK_IDLE BIT(9)
+#define B_DRCK_EN BIT(6)
+#define B_DRCK_VAL GENMASK(4, 0)
+#define R_DRCK_RES 0xC0C8
+#define B_DRCK_RES GENMASK(19, 15)
+#define B_DRCK_POL BIT(3)
+#define R_PATH0_SAMPL_DLY_T_V1 0xC0D4
+#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
+#define R_P0_CFCH_BW0 0xC0D4
+#define B_P0_CFCH_BW0 GENMASK(27, 26)
+#define R_P0_CFCH_BW1 0xC0D8
+#define B_P0_CFCH_BW1 GENMASK(8, 5)
+#define R_ADDCK0 0xC0F4
+#define B_ADDCK0 GENMASK(9, 8)
+#define B_ADDCK0_EN BIT(4)
+#define B_ADDCK0_RST BIT(2)
+#define R_ADDCK0_RL 0xC0F8
+#define B_ADDCK0_RLS GENMASK(29, 28)
+#define B_ADDCK0_RL1 GENMASK(27, 18)
+#define B_ADDCK0_RL0 GENMASK(17, 8)
+#define R_ADDCKR0 0xC0FC
+#define B_ADDCKR0_A0 GENMASK(19, 10)
+#define B_ADDCKR0_A1 GENMASK(9, 0)
+#define R_DACK10 0xC100
+#define B_DACK10 GENMASK(4, 1)
+#define R_DACK1_K 0xc104
+#define B_DACK1_EN BIT(0)
+#define R_DACK11 0xC120
+#define B_DACK11 GENMASK(4, 1)
+#define R_DACK_S1P0 0xC140
+#define B_DACK_S1P0_OK BIT(31)
+#define R_DACK_BIAS10 0xC148
+#define B_DACK_BIAS10 GENMASK(11, 2)
+#define R_DACK10S 0xC15C
+#define B_DACK10S GENMASK(31, 24)
+#define R_DACK_S1P2 0xC15C
+#define B_DACK_S1P2_OK BIT(2)
+#define R_DACK_DADCK10 0xC160
+#define B_DACK_DADCK10 GENMASK(31, 24)
+#define R_DACK_S1P1 0xC164
+#define B_DACK_S1P1_OK BIT(31)
+#define R_DACK_BIAS11 0xC16C
+#define B_DACK_BIAS11 GENMASK(11, 2)
+#define R_DACK11S 0xC180
+#define B_DACK11S GENMASK(31, 24)
+#define R_DACK_S1P3 0xC180
+#define B_DACK_S1P3_OK BIT(2)
+#define R_DACK_DADCK11 0xC184
+#define B_DACK_DADCK11 GENMASK(31, 24)
+#define R_PATH1_SAMPL_DLY_T_V1 0xC1D4
+#define B_PATH1_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
+#define R_PATH0_BW_SEL_V1 0xC0D8
+#define B_PATH0_BW_SEL_MSK_V1 GENMASK(8, 5)
+#define R_PATH1_BW_SEL_V1 0xC1D8
+#define B_PATH1_BW_SEL_MSK_V1 GENMASK(8, 5)
+#define R_ADDCK1 0xC1F4
+#define B_ADDCK1 GENMASK(9, 8)
+#define B_ADDCK1_EN BIT(4)
+#define B_ADDCK1_RST BIT(2)
+#define R_ADDCK1_RL 0xC1F8
+#define B_ADDCK1_RLS GENMASK(29, 28)
+#define B_ADDCK1_RL1 GENMASK(27, 18)
+#define B_ADDCK1_RL0 GENMASK(17, 8)
+#define R_ADDCKR1 0xC1fC
+#define B_ADDCKR1_A0 GENMASK(19, 10)
+#define B_ADDCKR1_A1 GENMASK(9, 0)
+
+/* WiFi CPU local domain */
+#define R_AX_WDT_CTRL 0x0040
+#define B_AX_WDT_EN BIT(31)
+#define B_AX_WDT_OPT_RESET_PLATFORM_EN BIT(29)
+#define B_AX_IO_HANG_IMR BIT(27)
+#define B_AX_IO_HANG_CMAC_RDATA_EN BIT(26)
+#define B_AX_IO_HANG_DMAC_EN BIT(25)
+#define B_AX_WDT_CLR BIT(16)
+#define B_AX_WDT_COUNT_MASK GENMASK(15, 0)
+#define WDT_CTRL_ALL_DIS 0
+
+#define R_AX_WDT_STATUS 0x0044
+#define B_AX_FS_WDT_INT BIT(8)
+#define B_AX_FS_WDT_INT_MSK BIT(0)
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 4c37e590e43c..20c7afd3e70f 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -5,254 +5,253 @@
#include "debug.h"
#include "ps.h"
-#define COUNTRY_REGD(_alpha2, _txpwr_regd_2g, _txpwr_regd_5g) \
+#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \
{.alpha2 = (_alpha2), \
- .txpwr_regd[RTW89_BAND_2G] = (_txpwr_regd_2g), \
- .txpwr_regd[RTW89_BAND_5G] = (_txpwr_regd_5g) \
+ .txpwr_regd = {_txpwr_regd}, \
}
static const struct rtw89_regulatory rtw89_ww_regd =
COUNTRY_REGD("00", RTW89_WW, RTW89_WW);
static const struct rtw89_regulatory rtw89_regd_map[] = {
- COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE),
- COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO),
- COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR),
- COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE),
- COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CN", RTW89_CN, RTW89_CN),
- COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC),
- COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CA", RTW89_IC, RTW89_IC),
- COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK),
- COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC),
- COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
+ COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA),
+ COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK),
+ COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
+ COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
+ COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
+ COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
+ COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC),
+ COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_NA),
+ COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC, RTW89_NA),
+ COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
+ COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2)
@@ -272,6 +271,17 @@ static bool rtw89_regd_is_ww(const struct rtw89_regulatory *regd)
return regd == &rtw89_ww_regd;
}
+#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \
+do { \
+ typeof(_regd) __r = _regd; \
+ rtw89_debug(_dev, RTW89_DBG_REGD, _desc \
+ ": %c%c: mapping txregd to {2g: %d, 5g: %d, 6g: %d}\n", \
+ ##_argv, __r->alpha2[0], __r->alpha2[1], \
+ __r->txpwr_regd[RTW89_BAND_2G], \
+ __r->txpwr_regd[RTW89_BAND_5G], \
+ __r->txpwr_regd[RTW89_BAND_6G]); \
+} while (0)
+
int rtw89_regd_init(struct rtw89_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
@@ -294,20 +304,12 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
if (ret)
rtw89_warn(rtwdev, "failed to hint regulatory:%d\n", ret);
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "efuse country code %c%c, mapping to 2g txregd %d, 5g txregd %d\n",
- rtwdev->efuse.country_code[0], rtwdev->efuse.country_code[1],
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
-
+ rtw89_debug_regd(rtwdev, chip_regd, "efuse country code");
return 0;
}
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "worldwide roaming chip, follow the setting of stack(%c%c), mapping to 2g txregd %d, 5g txregd %d\n",
- rtwdev->regd->alpha2[0], rtwdev->regd->alpha2[1],
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+ rtw89_debug_regd(rtwdev, rtwdev->regd,
+ "worldwide roaming chip, follow the setting of stack");
return 0;
}
@@ -341,11 +343,8 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
goto exit;
}
rtw89_regd_notifier_apply(rtwdev, wiphy, request);
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "get alpha2 %c%c from initiator %d, mapping to 2g txregd %d, 5g txregd %d\n",
- request->alpha2[0], request->alpha2[1], request->initiator,
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+ rtw89_debug_regd(rtwdev, rtwdev->regd, "get from initiator %d, alpha2",
+ request->initiator);
rtw89_chip_set_txpwr(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 41fc8db311ec..81bd0c4fe21b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -37,19 +37,21 @@ static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
- &rtw89_hfc_preccfg_pcie, RTW89_HCIFC_POH},
- [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_hfc_preccfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
};
static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size0, &rtw89_ple_size0,
- &rtw89_wde_qt0, &rtw89_wde_qt0, &rtw89_ple_qt4,
- &rtw89_ple_qt5},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size4, &rtw89_ple_size4,
- &rtw89_wde_qt4, &rtw89_wde_qt4, &rtw89_ple_qt13,
- &rtw89_ple_qt13},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size0,
+ &rtw89_mac_size.ple_size0, &rtw89_mac_size.wde_qt0,
+ &rtw89_mac_size.wde_qt0, &rtw89_mac_size.ple_qt4,
+ &rtw89_mac_size.ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4,
+ &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
@@ -406,6 +408,51 @@ static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
};
+static const struct rtw89_imr_info rtw8852a_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET,
+ .wsec_imr_reg = R_AX_SEC_DEBUG,
+ .wsec_imr_set = B_AX_IMR_ERROR,
+ .mpdu_tx_imr_set = 0,
+ .mpdu_rx_imr_set = 0,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR,
+ .wde_imr_set = B_AX_WDE_IMR_SET,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR,
+ .ple_imr_set = B_AX_PLE_IMR_SET,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
+ .other_disp_imr_set = 0,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
+ .bbrpt_err_imr_set = 0,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET,
+ .cdma_imr_0_reg = R_AX_DLE_CTRL,
+ .cdma_imr_0_clr = B_AX_DLE_IMR_CLR,
+ .cdma_imr_0_set = B_AX_DLE_IMR_SET,
+ .cdma_imr_1_reg = 0,
+ .cdma_imr_1_clr = 0,
+ .cdma_imr_1_set = 0,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR,
+ .phy_intf_imr_clr = 0,
+ .phy_intf_imr_set = 0,
+ .rmac_imr_reg = R_AX_RMAC_ERR_ISR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET,
+ .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET,
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -1841,7 +1888,8 @@ rtw8852a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
u32 _cur, _wrt; \
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
"btc ctrl %s: 0x%x\n", #_case, _val); \
- rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur);\
+ if (rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur))\
+ break; \
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
"btc ctrl ori 0x%x: 0x%x\n", _reg, _cur); \
_wrt = __do_clr(_val) ? \
@@ -1994,6 +2042,8 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
}
static const struct rtw89_chip_ops rtw8852a_chip_ops = {
+ .enable_bb_rf = rtw89_mac_enable_bb_rf,
+ .disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -2016,13 +2066,17 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.ctrl_btg = rtw8852a_ctrl_btg,
.query_ppdu = rtw8852a_query_ppdu,
.bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc,
+ .cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
+ .h2c_dctl_sec_cam = NULL,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2041,12 +2095,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.fifo_size = 458752,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
.dle_mem = rtw8852a_dle_mem_pcie,
.rf_base_addr = {0xc000, 0xd000},
.pwr_on_seq = pwr_on_seq_8852a,
.pwr_off_seq = pwr_off_seq_8852a,
.bb_table = &rtw89_8852a_phy_bb_table,
+ .bb_gain_table = NULL,
.rf_table = {&rtw89_8852a_phy_radioa_table,
&rtw89_8852a_phy_radiob_table,},
.nctl_table = &rtw89_8852a_phy_nctl_table,
@@ -2058,9 +2114,11 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .tssi_dbw_table = NULL,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bw160 = false,
+ .hw_sec_hdr = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2093,7 +2151,11 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
.hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
.h2c_ctrl_reg = R_AX_H2CREG_CTRL,
.h2c_regs = rtw8852a_h2c_regs,
.c2h_ctrl_reg = R_AX_C2HREG_CTRL,
@@ -2101,6 +2163,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.page_regs = &rtw8852a_page_regs,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 3,
+ .imr_info = &rtw8852a_imr_info
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index ad272854c442..e3c2fce32651 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -2189,8 +2189,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
"[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
corr_val);
- dpk->corr_idx[path] = corr_idx;
- dpk->corr_val[path] = corr_val;
+ dpk->corr_idx[path][0] = corr_idx;
+ dpk->corr_val[path][0] = corr_val;
rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
@@ -2203,8 +2203,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
path, dc_i, dc_q);
- dpk->dc_i[path] = dc_i;
- dpk->dc_q[path] = dc_q;
+ dpk->dc_i[path][0] = dc_i;
+ dpk->dc_q[path][0] = dc_q;
if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
corr_val < DPK_SYNC_TH_CORR)
@@ -2907,10 +2907,10 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 ch = rtwdev->hal.current_channel;
u8 subband = rtwdev->hal.current_subband;
- const u8 *thm_up_a = NULL;
- const u8 *thm_down_a = NULL;
- const u8 *thm_up_b = NULL;
- const u8 *thm_down_b = NULL;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
u8 thermal = 0xff;
s8 thm_ofst[64] = {0};
u32 tmp = 0;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
index 253b5f8fc4f9..99479bbb0939 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -43313,7 +43313,7 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852a_txpwr_byrate[] = {
{ 1, 0, 4, 0, 4, 0x00000000, },
};
-static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43322,7 +43322,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43331,7 +43331,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43340,7 +43340,7 @@ static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43349,35 +43349,35 @@ static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_2gb_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2gb_n[] = {
0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
-static const u8 _txpwr_track_delta_swingidx_2gb_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2gb_p[] = {
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
-static const u8 _txpwr_track_delta_swingidx_2ga_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2ga_n[] = {
0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
-static const u8 _txpwr_track_delta_swingidx_2ga_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2ga_p[] = {
0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
@@ -43563,6 +43563,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][0] = 76,
[0][0][0][0][RTW89_CN][0] = 56,
[0][0][0][0][RTW89_QATAR][0] = 56,
+ [0][0][0][0][RTW89_UK][0] = 56,
[0][0][0][0][RTW89_FCC][1] = 76,
[0][0][0][0][RTW89_ETSI][1] = 56,
[0][0][0][0][RTW89_MKK][1] = 68,
@@ -43574,6 +43575,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][1] = 76,
[0][0][0][0][RTW89_CN][1] = 56,
[0][0][0][0][RTW89_QATAR][1] = 56,
+ [0][0][0][0][RTW89_UK][1] = 56,
[0][0][0][0][RTW89_FCC][2] = 76,
[0][0][0][0][RTW89_ETSI][2] = 56,
[0][0][0][0][RTW89_MKK][2] = 68,
@@ -43585,6 +43587,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][2] = 76,
[0][0][0][0][RTW89_CN][2] = 56,
[0][0][0][0][RTW89_QATAR][2] = 56,
+ [0][0][0][0][RTW89_UK][2] = 56,
[0][0][0][0][RTW89_FCC][3] = 76,
[0][0][0][0][RTW89_ETSI][3] = 56,
[0][0][0][0][RTW89_MKK][3] = 68,
@@ -43596,6 +43599,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][3] = 76,
[0][0][0][0][RTW89_CN][3] = 56,
[0][0][0][0][RTW89_QATAR][3] = 56,
+ [0][0][0][0][RTW89_UK][3] = 56,
[0][0][0][0][RTW89_FCC][4] = 76,
[0][0][0][0][RTW89_ETSI][4] = 56,
[0][0][0][0][RTW89_MKK][4] = 68,
@@ -43607,6 +43611,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][4] = 76,
[0][0][0][0][RTW89_CN][4] = 56,
[0][0][0][0][RTW89_QATAR][4] = 56,
+ [0][0][0][0][RTW89_UK][4] = 56,
[0][0][0][0][RTW89_FCC][5] = 76,
[0][0][0][0][RTW89_ETSI][5] = 56,
[0][0][0][0][RTW89_MKK][5] = 68,
@@ -43618,6 +43623,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][5] = 76,
[0][0][0][0][RTW89_CN][5] = 56,
[0][0][0][0][RTW89_QATAR][5] = 56,
+ [0][0][0][0][RTW89_UK][5] = 56,
[0][0][0][0][RTW89_FCC][6] = 76,
[0][0][0][0][RTW89_ETSI][6] = 56,
[0][0][0][0][RTW89_MKK][6] = 68,
@@ -43629,6 +43635,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][6] = 76,
[0][0][0][0][RTW89_CN][6] = 56,
[0][0][0][0][RTW89_QATAR][6] = 56,
+ [0][0][0][0][RTW89_UK][6] = 56,
[0][0][0][0][RTW89_FCC][7] = 76,
[0][0][0][0][RTW89_ETSI][7] = 56,
[0][0][0][0][RTW89_MKK][7] = 68,
@@ -43640,6 +43647,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][7] = 76,
[0][0][0][0][RTW89_CN][7] = 56,
[0][0][0][0][RTW89_QATAR][7] = 56,
+ [0][0][0][0][RTW89_UK][7] = 56,
[0][0][0][0][RTW89_FCC][8] = 76,
[0][0][0][0][RTW89_ETSI][8] = 56,
[0][0][0][0][RTW89_MKK][8] = 68,
@@ -43651,6 +43659,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][8] = 76,
[0][0][0][0][RTW89_CN][8] = 56,
[0][0][0][0][RTW89_QATAR][8] = 56,
+ [0][0][0][0][RTW89_UK][8] = 56,
[0][0][0][0][RTW89_FCC][9] = 76,
[0][0][0][0][RTW89_ETSI][9] = 56,
[0][0][0][0][RTW89_MKK][9] = 68,
@@ -43662,6 +43671,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][9] = 76,
[0][0][0][0][RTW89_CN][9] = 56,
[0][0][0][0][RTW89_QATAR][9] = 56,
+ [0][0][0][0][RTW89_UK][9] = 56,
[0][0][0][0][RTW89_FCC][10] = 76,
[0][0][0][0][RTW89_ETSI][10] = 56,
[0][0][0][0][RTW89_MKK][10] = 68,
@@ -43673,6 +43683,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][10] = 76,
[0][0][0][0][RTW89_CN][10] = 56,
[0][0][0][0][RTW89_QATAR][10] = 56,
+ [0][0][0][0][RTW89_UK][10] = 56,
[0][0][0][0][RTW89_FCC][11] = 68,
[0][0][0][0][RTW89_ETSI][11] = 56,
[0][0][0][0][RTW89_MKK][11] = 68,
@@ -43684,6 +43695,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][11] = 68,
[0][0][0][0][RTW89_CN][11] = 56,
[0][0][0][0][RTW89_QATAR][11] = 56,
+ [0][0][0][0][RTW89_UK][11] = 56,
[0][0][0][0][RTW89_FCC][12] = 48,
[0][0][0][0][RTW89_ETSI][12] = 56,
[0][0][0][0][RTW89_MKK][12] = 68,
@@ -43695,6 +43707,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][12] = 48,
[0][0][0][0][RTW89_CN][12] = 56,
[0][0][0][0][RTW89_QATAR][12] = 56,
+ [0][0][0][0][RTW89_UK][12] = 56,
[0][0][0][0][RTW89_FCC][13] = 127,
[0][0][0][0][RTW89_ETSI][13] = 127,
[0][0][0][0][RTW89_MKK][13] = 76,
@@ -43706,6 +43719,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][13] = 127,
[0][0][0][0][RTW89_CN][13] = 127,
[0][0][0][0][RTW89_QATAR][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
[0][1][0][0][RTW89_FCC][0] = 74,
[0][1][0][0][RTW89_ETSI][0] = 44,
[0][1][0][0][RTW89_MKK][0] = 56,
@@ -43717,6 +43731,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][0] = 74,
[0][1][0][0][RTW89_CN][0] = 44,
[0][1][0][0][RTW89_QATAR][0] = 44,
+ [0][1][0][0][RTW89_UK][0] = 44,
[0][1][0][0][RTW89_FCC][1] = 76,
[0][1][0][0][RTW89_ETSI][1] = 44,
[0][1][0][0][RTW89_MKK][1] = 56,
@@ -43728,6 +43743,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][1] = 76,
[0][1][0][0][RTW89_CN][1] = 44,
[0][1][0][0][RTW89_QATAR][1] = 44,
+ [0][1][0][0][RTW89_UK][1] = 44,
[0][1][0][0][RTW89_FCC][2] = 76,
[0][1][0][0][RTW89_ETSI][2] = 44,
[0][1][0][0][RTW89_MKK][2] = 56,
@@ -43739,6 +43755,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][2] = 76,
[0][1][0][0][RTW89_CN][2] = 44,
[0][1][0][0][RTW89_QATAR][2] = 44,
+ [0][1][0][0][RTW89_UK][2] = 44,
[0][1][0][0][RTW89_FCC][3] = 76,
[0][1][0][0][RTW89_ETSI][3] = 44,
[0][1][0][0][RTW89_MKK][3] = 56,
@@ -43750,6 +43767,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][3] = 76,
[0][1][0][0][RTW89_CN][3] = 44,
[0][1][0][0][RTW89_QATAR][3] = 44,
+ [0][1][0][0][RTW89_UK][3] = 44,
[0][1][0][0][RTW89_FCC][4] = 76,
[0][1][0][0][RTW89_ETSI][4] = 44,
[0][1][0][0][RTW89_MKK][4] = 56,
@@ -43761,6 +43779,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][4] = 76,
[0][1][0][0][RTW89_CN][4] = 44,
[0][1][0][0][RTW89_QATAR][4] = 44,
+ [0][1][0][0][RTW89_UK][4] = 44,
[0][1][0][0][RTW89_FCC][5] = 76,
[0][1][0][0][RTW89_ETSI][5] = 44,
[0][1][0][0][RTW89_MKK][5] = 56,
@@ -43772,6 +43791,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][5] = 76,
[0][1][0][0][RTW89_CN][5] = 44,
[0][1][0][0][RTW89_QATAR][5] = 44,
+ [0][1][0][0][RTW89_UK][5] = 44,
[0][1][0][0][RTW89_FCC][6] = 76,
[0][1][0][0][RTW89_ETSI][6] = 44,
[0][1][0][0][RTW89_MKK][6] = 56,
@@ -43783,6 +43803,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][6] = 76,
[0][1][0][0][RTW89_CN][6] = 44,
[0][1][0][0][RTW89_QATAR][6] = 44,
+ [0][1][0][0][RTW89_UK][6] = 44,
[0][1][0][0][RTW89_FCC][7] = 76,
[0][1][0][0][RTW89_ETSI][7] = 44,
[0][1][0][0][RTW89_MKK][7] = 56,
@@ -43794,6 +43815,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][7] = 76,
[0][1][0][0][RTW89_CN][7] = 44,
[0][1][0][0][RTW89_QATAR][7] = 44,
+ [0][1][0][0][RTW89_UK][7] = 44,
[0][1][0][0][RTW89_FCC][8] = 76,
[0][1][0][0][RTW89_ETSI][8] = 44,
[0][1][0][0][RTW89_MKK][8] = 56,
@@ -43805,6 +43827,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][8] = 76,
[0][1][0][0][RTW89_CN][8] = 44,
[0][1][0][0][RTW89_QATAR][8] = 44,
+ [0][1][0][0][RTW89_UK][8] = 44,
[0][1][0][0][RTW89_FCC][9] = 76,
[0][1][0][0][RTW89_ETSI][9] = 44,
[0][1][0][0][RTW89_MKK][9] = 56,
@@ -43816,6 +43839,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][9] = 76,
[0][1][0][0][RTW89_CN][9] = 44,
[0][1][0][0][RTW89_QATAR][9] = 44,
+ [0][1][0][0][RTW89_UK][9] = 44,
[0][1][0][0][RTW89_FCC][10] = 62,
[0][1][0][0][RTW89_ETSI][10] = 44,
[0][1][0][0][RTW89_MKK][10] = 56,
@@ -43827,6 +43851,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][10] = 62,
[0][1][0][0][RTW89_CN][10] = 44,
[0][1][0][0][RTW89_QATAR][10] = 44,
+ [0][1][0][0][RTW89_UK][10] = 44,
[0][1][0][0][RTW89_FCC][11] = 52,
[0][1][0][0][RTW89_ETSI][11] = 44,
[0][1][0][0][RTW89_MKK][11] = 56,
@@ -43838,6 +43863,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][11] = 52,
[0][1][0][0][RTW89_CN][11] = 44,
[0][1][0][0][RTW89_QATAR][11] = 44,
+ [0][1][0][0][RTW89_UK][11] = 44,
[0][1][0][0][RTW89_FCC][12] = 38,
[0][1][0][0][RTW89_ETSI][12] = 44,
[0][1][0][0][RTW89_MKK][12] = 56,
@@ -43849,6 +43875,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][12] = 38,
[0][1][0][0][RTW89_CN][12] = 44,
[0][1][0][0][RTW89_QATAR][12] = 44,
+ [0][1][0][0][RTW89_UK][12] = 44,
[0][1][0][0][RTW89_FCC][13] = 127,
[0][1][0][0][RTW89_ETSI][13] = 127,
[0][1][0][0][RTW89_MKK][13] = 64,
@@ -43860,6 +43887,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][13] = 127,
[0][1][0][0][RTW89_CN][13] = 127,
[0][1][0][0][RTW89_QATAR][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
[1][0][0][0][RTW89_FCC][0] = 127,
[1][0][0][0][RTW89_ETSI][0] = 127,
[1][0][0][0][RTW89_MKK][0] = 127,
@@ -43871,6 +43899,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][0] = 127,
[1][0][0][0][RTW89_CN][0] = 127,
[1][0][0][0][RTW89_QATAR][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
[1][0][0][0][RTW89_FCC][1] = 127,
[1][0][0][0][RTW89_ETSI][1] = 127,
[1][0][0][0][RTW89_MKK][1] = 127,
@@ -43882,6 +43911,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][1] = 127,
[1][0][0][0][RTW89_CN][1] = 127,
[1][0][0][0][RTW89_QATAR][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
[1][0][0][0][RTW89_FCC][2] = 60,
[1][0][0][0][RTW89_ETSI][2] = 58,
[1][0][0][0][RTW89_MKK][2] = 68,
@@ -43893,6 +43923,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][2] = 60,
[1][0][0][0][RTW89_CN][2] = 58,
[1][0][0][0][RTW89_QATAR][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 58,
[1][0][0][0][RTW89_FCC][3] = 60,
[1][0][0][0][RTW89_ETSI][3] = 58,
[1][0][0][0][RTW89_MKK][3] = 68,
@@ -43904,6 +43935,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][3] = 60,
[1][0][0][0][RTW89_CN][3] = 58,
[1][0][0][0][RTW89_QATAR][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 58,
[1][0][0][0][RTW89_FCC][4] = 60,
[1][0][0][0][RTW89_ETSI][4] = 58,
[1][0][0][0][RTW89_MKK][4] = 68,
@@ -43915,6 +43947,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][4] = 60,
[1][0][0][0][RTW89_CN][4] = 58,
[1][0][0][0][RTW89_QATAR][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 58,
[1][0][0][0][RTW89_FCC][5] = 60,
[1][0][0][0][RTW89_ETSI][5] = 58,
[1][0][0][0][RTW89_MKK][5] = 68,
@@ -43926,6 +43959,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][5] = 60,
[1][0][0][0][RTW89_CN][5] = 58,
[1][0][0][0][RTW89_QATAR][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 58,
[1][0][0][0][RTW89_FCC][6] = 46,
[1][0][0][0][RTW89_ETSI][6] = 58,
[1][0][0][0][RTW89_MKK][6] = 68,
@@ -43937,6 +43971,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][6] = 46,
[1][0][0][0][RTW89_CN][6] = 58,
[1][0][0][0][RTW89_QATAR][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 58,
[1][0][0][0][RTW89_FCC][7] = 46,
[1][0][0][0][RTW89_ETSI][7] = 58,
[1][0][0][0][RTW89_MKK][7] = 68,
@@ -43948,6 +43983,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][7] = 46,
[1][0][0][0][RTW89_CN][7] = 58,
[1][0][0][0][RTW89_QATAR][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 58,
[1][0][0][0][RTW89_FCC][8] = 46,
[1][0][0][0][RTW89_ETSI][8] = 58,
[1][0][0][0][RTW89_MKK][8] = 68,
@@ -43959,6 +43995,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][8] = 46,
[1][0][0][0][RTW89_CN][8] = 58,
[1][0][0][0][RTW89_QATAR][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 58,
[1][0][0][0][RTW89_FCC][9] = 32,
[1][0][0][0][RTW89_ETSI][9] = 58,
[1][0][0][0][RTW89_MKK][9] = 68,
@@ -43970,6 +44007,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][9] = 32,
[1][0][0][0][RTW89_CN][9] = 58,
[1][0][0][0][RTW89_QATAR][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 58,
[1][0][0][0][RTW89_FCC][10] = 32,
[1][0][0][0][RTW89_ETSI][10] = 58,
[1][0][0][0][RTW89_MKK][10] = 68,
@@ -43981,6 +44019,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][10] = 32,
[1][0][0][0][RTW89_CN][10] = 58,
[1][0][0][0][RTW89_QATAR][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 58,
[1][0][0][0][RTW89_FCC][11] = 127,
[1][0][0][0][RTW89_ETSI][11] = 127,
[1][0][0][0][RTW89_MKK][11] = 127,
@@ -43992,6 +44031,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][11] = 127,
[1][0][0][0][RTW89_CN][11] = 127,
[1][0][0][0][RTW89_QATAR][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
[1][0][0][0][RTW89_FCC][12] = 127,
[1][0][0][0][RTW89_ETSI][12] = 127,
[1][0][0][0][RTW89_MKK][12] = 127,
@@ -44003,6 +44043,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][12] = 127,
[1][0][0][0][RTW89_CN][12] = 127,
[1][0][0][0][RTW89_QATAR][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
[1][0][0][0][RTW89_FCC][13] = 127,
[1][0][0][0][RTW89_ETSI][13] = 127,
[1][0][0][0][RTW89_MKK][13] = 127,
@@ -44014,6 +44055,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][13] = 127,
[1][0][0][0][RTW89_CN][13] = 127,
[1][0][0][0][RTW89_QATAR][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
[1][1][0][0][RTW89_FCC][0] = 127,
[1][1][0][0][RTW89_ETSI][0] = 127,
[1][1][0][0][RTW89_MKK][0] = 127,
@@ -44025,6 +44067,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][0] = 127,
[1][1][0][0][RTW89_CN][0] = 127,
[1][1][0][0][RTW89_QATAR][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
[1][1][0][0][RTW89_FCC][1] = 127,
[1][1][0][0][RTW89_ETSI][1] = 127,
[1][1][0][0][RTW89_MKK][1] = 127,
@@ -44036,6 +44079,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][1] = 127,
[1][1][0][0][RTW89_CN][1] = 127,
[1][1][0][0][RTW89_QATAR][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
[1][1][0][0][RTW89_FCC][2] = 48,
[1][1][0][0][RTW89_ETSI][2] = 46,
[1][1][0][0][RTW89_MKK][2] = 56,
@@ -44047,6 +44091,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][2] = 48,
[1][1][0][0][RTW89_CN][2] = 46,
[1][1][0][0][RTW89_QATAR][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 46,
[1][1][0][0][RTW89_FCC][3] = 48,
[1][1][0][0][RTW89_ETSI][3] = 46,
[1][1][0][0][RTW89_MKK][3] = 56,
@@ -44058,6 +44103,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][3] = 48,
[1][1][0][0][RTW89_CN][3] = 46,
[1][1][0][0][RTW89_QATAR][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 46,
[1][1][0][0][RTW89_FCC][4] = 48,
[1][1][0][0][RTW89_ETSI][4] = 46,
[1][1][0][0][RTW89_MKK][4] = 56,
@@ -44069,6 +44115,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][4] = 48,
[1][1][0][0][RTW89_CN][4] = 46,
[1][1][0][0][RTW89_QATAR][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 46,
[1][1][0][0][RTW89_FCC][5] = 58,
[1][1][0][0][RTW89_ETSI][5] = 46,
[1][1][0][0][RTW89_MKK][5] = 56,
@@ -44080,6 +44127,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][5] = 58,
[1][1][0][0][RTW89_CN][5] = 46,
[1][1][0][0][RTW89_QATAR][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 46,
[1][1][0][0][RTW89_FCC][6] = 46,
[1][1][0][0][RTW89_ETSI][6] = 46,
[1][1][0][0][RTW89_MKK][6] = 56,
@@ -44091,6 +44139,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][6] = 46,
[1][1][0][0][RTW89_CN][6] = 46,
[1][1][0][0][RTW89_QATAR][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 46,
[1][1][0][0][RTW89_FCC][7] = 46,
[1][1][0][0][RTW89_ETSI][7] = 46,
[1][1][0][0][RTW89_MKK][7] = 56,
@@ -44102,6 +44151,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][7] = 46,
[1][1][0][0][RTW89_CN][7] = 46,
[1][1][0][0][RTW89_QATAR][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 46,
[1][1][0][0][RTW89_FCC][8] = 46,
[1][1][0][0][RTW89_ETSI][8] = 46,
[1][1][0][0][RTW89_MKK][8] = 56,
@@ -44113,6 +44163,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][8] = 46,
[1][1][0][0][RTW89_CN][8] = 46,
[1][1][0][0][RTW89_QATAR][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 46,
[1][1][0][0][RTW89_FCC][9] = 24,
[1][1][0][0][RTW89_ETSI][9] = 46,
[1][1][0][0][RTW89_MKK][9] = 56,
@@ -44124,6 +44175,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][9] = 24,
[1][1][0][0][RTW89_CN][9] = 46,
[1][1][0][0][RTW89_QATAR][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 46,
[1][1][0][0][RTW89_FCC][10] = 24,
[1][1][0][0][RTW89_ETSI][10] = 46,
[1][1][0][0][RTW89_MKK][10] = 56,
@@ -44135,6 +44187,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][10] = 24,
[1][1][0][0][RTW89_CN][10] = 46,
[1][1][0][0][RTW89_QATAR][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 46,
[1][1][0][0][RTW89_FCC][11] = 127,
[1][1][0][0][RTW89_ETSI][11] = 127,
[1][1][0][0][RTW89_MKK][11] = 127,
@@ -44146,6 +44199,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][11] = 127,
[1][1][0][0][RTW89_CN][11] = 127,
[1][1][0][0][RTW89_QATAR][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
[1][1][0][0][RTW89_FCC][12] = 127,
[1][1][0][0][RTW89_ETSI][12] = 127,
[1][1][0][0][RTW89_MKK][12] = 127,
@@ -44157,6 +44211,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][12] = 127,
[1][1][0][0][RTW89_CN][12] = 127,
[1][1][0][0][RTW89_QATAR][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
[1][1][0][0][RTW89_FCC][13] = 127,
[1][1][0][0][RTW89_ETSI][13] = 127,
[1][1][0][0][RTW89_MKK][13] = 127,
@@ -44168,6 +44223,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][13] = 127,
[1][1][0][0][RTW89_CN][13] = 127,
[1][1][0][0][RTW89_QATAR][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
[0][0][1][0][RTW89_FCC][0] = 66,
[0][0][1][0][RTW89_ETSI][0] = 58,
[0][0][1][0][RTW89_MKK][0] = 76,
@@ -44179,6 +44235,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][0] = 66,
[0][0][1][0][RTW89_CN][0] = 58,
[0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
[0][0][1][0][RTW89_FCC][1] = 66,
[0][0][1][0][RTW89_ETSI][1] = 58,
[0][0][1][0][RTW89_MKK][1] = 76,
@@ -44190,6 +44247,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][1] = 66,
[0][0][1][0][RTW89_CN][1] = 58,
[0][0][1][0][RTW89_QATAR][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 58,
[0][0][1][0][RTW89_FCC][2] = 70,
[0][0][1][0][RTW89_ETSI][2] = 58,
[0][0][1][0][RTW89_MKK][2] = 76,
@@ -44201,6 +44259,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][2] = 70,
[0][0][1][0][RTW89_CN][2] = 58,
[0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
[0][0][1][0][RTW89_FCC][3] = 74,
[0][0][1][0][RTW89_ETSI][3] = 58,
[0][0][1][0][RTW89_MKK][3] = 76,
@@ -44212,6 +44271,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][3] = 74,
[0][0][1][0][RTW89_CN][3] = 58,
[0][0][1][0][RTW89_QATAR][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 58,
[0][0][1][0][RTW89_FCC][4] = 78,
[0][0][1][0][RTW89_ETSI][4] = 58,
[0][0][1][0][RTW89_MKK][4] = 76,
@@ -44223,6 +44283,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][4] = 78,
[0][0][1][0][RTW89_CN][4] = 58,
[0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
[0][0][1][0][RTW89_FCC][5] = 78,
[0][0][1][0][RTW89_ETSI][5] = 58,
[0][0][1][0][RTW89_MKK][5] = 76,
@@ -44234,6 +44295,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][5] = 78,
[0][0][1][0][RTW89_CN][5] = 58,
[0][0][1][0][RTW89_QATAR][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 58,
[0][0][1][0][RTW89_FCC][6] = 78,
[0][0][1][0][RTW89_ETSI][6] = 58,
[0][0][1][0][RTW89_MKK][6] = 76,
@@ -44245,6 +44307,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][6] = 78,
[0][0][1][0][RTW89_CN][6] = 58,
[0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
[0][0][1][0][RTW89_FCC][7] = 74,
[0][0][1][0][RTW89_ETSI][7] = 58,
[0][0][1][0][RTW89_MKK][7] = 76,
@@ -44256,6 +44319,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][7] = 74,
[0][0][1][0][RTW89_CN][7] = 58,
[0][0][1][0][RTW89_QATAR][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 58,
[0][0][1][0][RTW89_FCC][8] = 70,
[0][0][1][0][RTW89_ETSI][8] = 58,
[0][0][1][0][RTW89_MKK][8] = 76,
@@ -44267,6 +44331,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][8] = 70,
[0][0][1][0][RTW89_CN][8] = 58,
[0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
[0][0][1][0][RTW89_FCC][9] = 66,
[0][0][1][0][RTW89_ETSI][9] = 58,
[0][0][1][0][RTW89_MKK][9] = 76,
@@ -44278,6 +44343,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][9] = 66,
[0][0][1][0][RTW89_CN][9] = 58,
[0][0][1][0][RTW89_QATAR][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 58,
[0][0][1][0][RTW89_FCC][10] = 66,
[0][0][1][0][RTW89_ETSI][10] = 58,
[0][0][1][0][RTW89_MKK][10] = 76,
@@ -44289,6 +44355,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][10] = 66,
[0][0][1][0][RTW89_CN][10] = 58,
[0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
[0][0][1][0][RTW89_FCC][11] = 56,
[0][0][1][0][RTW89_ETSI][11] = 58,
[0][0][1][0][RTW89_MKK][11] = 76,
@@ -44300,6 +44367,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][11] = 56,
[0][0][1][0][RTW89_CN][11] = 58,
[0][0][1][0][RTW89_QATAR][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 58,
[0][0][1][0][RTW89_FCC][12] = 52,
[0][0][1][0][RTW89_ETSI][12] = 58,
[0][0][1][0][RTW89_MKK][12] = 76,
@@ -44311,6 +44379,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][12] = 52,
[0][0][1][0][RTW89_CN][12] = 58,
[0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
[0][0][1][0][RTW89_FCC][13] = 127,
[0][0][1][0][RTW89_ETSI][13] = 127,
[0][0][1][0][RTW89_MKK][13] = 127,
@@ -44322,6 +44391,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][13] = 127,
[0][0][1][0][RTW89_CN][13] = 127,
[0][0][1][0][RTW89_QATAR][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
[0][1][1][0][RTW89_FCC][0] = 62,
[0][1][1][0][RTW89_ETSI][0] = 46,
[0][1][1][0][RTW89_MKK][0] = 64,
@@ -44333,6 +44403,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 62,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][1] = 62,
[0][1][1][0][RTW89_ETSI][1] = 46,
[0][1][1][0][RTW89_MKK][1] = 64,
@@ -44344,6 +44415,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][1] = 62,
[0][1][1][0][RTW89_CN][1] = 46,
[0][1][1][0][RTW89_QATAR][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 46,
[0][1][1][0][RTW89_FCC][2] = 66,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 64,
@@ -44355,6 +44427,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][2] = 66,
[0][1][1][0][RTW89_CN][2] = 46,
[0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
[0][1][1][0][RTW89_FCC][3] = 70,
[0][1][1][0][RTW89_ETSI][3] = 46,
[0][1][1][0][RTW89_MKK][3] = 64,
@@ -44366,6 +44439,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][3] = 70,
[0][1][1][0][RTW89_CN][3] = 46,
[0][1][1][0][RTW89_QATAR][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 46,
[0][1][1][0][RTW89_FCC][4] = 78,
[0][1][1][0][RTW89_ETSI][4] = 46,
[0][1][1][0][RTW89_MKK][4] = 64,
@@ -44377,6 +44451,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][4] = 78,
[0][1][1][0][RTW89_CN][4] = 46,
[0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
[0][1][1][0][RTW89_FCC][5] = 78,
[0][1][1][0][RTW89_ETSI][5] = 46,
[0][1][1][0][RTW89_MKK][5] = 64,
@@ -44388,6 +44463,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][5] = 78,
[0][1][1][0][RTW89_CN][5] = 46,
[0][1][1][0][RTW89_QATAR][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 46,
[0][1][1][0][RTW89_FCC][6] = 78,
[0][1][1][0][RTW89_ETSI][6] = 46,
[0][1][1][0][RTW89_MKK][6] = 64,
@@ -44399,6 +44475,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][6] = 78,
[0][1][1][0][RTW89_CN][6] = 46,
[0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
[0][1][1][0][RTW89_FCC][7] = 70,
[0][1][1][0][RTW89_ETSI][7] = 46,
[0][1][1][0][RTW89_MKK][7] = 64,
@@ -44410,6 +44487,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][7] = 70,
[0][1][1][0][RTW89_CN][7] = 46,
[0][1][1][0][RTW89_QATAR][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 46,
[0][1][1][0][RTW89_FCC][8] = 66,
[0][1][1][0][RTW89_ETSI][8] = 46,
[0][1][1][0][RTW89_MKK][8] = 64,
@@ -44421,6 +44499,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][8] = 66,
[0][1][1][0][RTW89_CN][8] = 46,
[0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
[0][1][1][0][RTW89_FCC][9] = 62,
[0][1][1][0][RTW89_ETSI][9] = 46,
[0][1][1][0][RTW89_MKK][9] = 64,
@@ -44432,6 +44511,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][9] = 62,
[0][1][1][0][RTW89_CN][9] = 46,
[0][1][1][0][RTW89_QATAR][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 46,
[0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 46,
[0][1][1][0][RTW89_MKK][10] = 64,
@@ -44443,6 +44523,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][10] = 62,
[0][1][1][0][RTW89_CN][10] = 46,
[0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
[0][1][1][0][RTW89_FCC][11] = 42,
[0][1][1][0][RTW89_ETSI][11] = 46,
[0][1][1][0][RTW89_MKK][11] = 64,
@@ -44454,6 +44535,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][11] = 42,
[0][1][1][0][RTW89_CN][11] = 46,
[0][1][1][0][RTW89_QATAR][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 46,
[0][1][1][0][RTW89_FCC][12] = 40,
[0][1][1][0][RTW89_ETSI][12] = 46,
[0][1][1][0][RTW89_MKK][12] = 64,
@@ -44465,6 +44547,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][12] = 40,
[0][1][1][0][RTW89_CN][12] = 46,
[0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
[0][1][1][0][RTW89_FCC][13] = 127,
[0][1][1][0][RTW89_ETSI][13] = 127,
[0][1][1][0][RTW89_MKK][13] = 127,
@@ -44476,6 +44559,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][13] = 127,
[0][1][1][0][RTW89_CN][13] = 127,
[0][1][1][0][RTW89_QATAR][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
[0][0][2][0][RTW89_FCC][0] = 66,
[0][0][2][0][RTW89_ETSI][0] = 58,
[0][0][2][0][RTW89_MKK][0] = 76,
@@ -44487,6 +44571,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][0] = 66,
[0][0][2][0][RTW89_CN][0] = 58,
[0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 58,
[0][0][2][0][RTW89_FCC][1] = 66,
[0][0][2][0][RTW89_ETSI][1] = 58,
[0][0][2][0][RTW89_MKK][1] = 76,
@@ -44498,6 +44583,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][1] = 66,
[0][0][2][0][RTW89_CN][1] = 58,
[0][0][2][0][RTW89_QATAR][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 58,
[0][0][2][0][RTW89_FCC][2] = 70,
[0][0][2][0][RTW89_ETSI][2] = 58,
[0][0][2][0][RTW89_MKK][2] = 76,
@@ -44509,6 +44595,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][2] = 70,
[0][0][2][0][RTW89_CN][2] = 58,
[0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 58,
[0][0][2][0][RTW89_FCC][3] = 74,
[0][0][2][0][RTW89_ETSI][3] = 58,
[0][0][2][0][RTW89_MKK][3] = 76,
@@ -44520,6 +44607,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][3] = 74,
[0][0][2][0][RTW89_CN][3] = 58,
[0][0][2][0][RTW89_QATAR][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 58,
[0][0][2][0][RTW89_FCC][4] = 76,
[0][0][2][0][RTW89_ETSI][4] = 58,
[0][0][2][0][RTW89_MKK][4] = 76,
@@ -44531,6 +44619,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][4] = 76,
[0][0][2][0][RTW89_CN][4] = 58,
[0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 58,
[0][0][2][0][RTW89_FCC][5] = 76,
[0][0][2][0][RTW89_ETSI][5] = 58,
[0][0][2][0][RTW89_MKK][5] = 76,
@@ -44542,6 +44631,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][5] = 76,
[0][0][2][0][RTW89_CN][5] = 58,
[0][0][2][0][RTW89_QATAR][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 58,
[0][0][2][0][RTW89_FCC][6] = 76,
[0][0][2][0][RTW89_ETSI][6] = 58,
[0][0][2][0][RTW89_MKK][6] = 76,
@@ -44553,6 +44643,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][6] = 76,
[0][0][2][0][RTW89_CN][6] = 58,
[0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 58,
[0][0][2][0][RTW89_FCC][7] = 74,
[0][0][2][0][RTW89_ETSI][7] = 58,
[0][0][2][0][RTW89_MKK][7] = 76,
@@ -44564,6 +44655,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][7] = 74,
[0][0][2][0][RTW89_CN][7] = 58,
[0][0][2][0][RTW89_QATAR][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 58,
[0][0][2][0][RTW89_FCC][8] = 70,
[0][0][2][0][RTW89_ETSI][8] = 58,
[0][0][2][0][RTW89_MKK][8] = 76,
@@ -44575,6 +44667,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][8] = 70,
[0][0][2][0][RTW89_CN][8] = 58,
[0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 58,
[0][0][2][0][RTW89_FCC][9] = 66,
[0][0][2][0][RTW89_ETSI][9] = 58,
[0][0][2][0][RTW89_MKK][9] = 76,
@@ -44586,6 +44679,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][9] = 66,
[0][0][2][0][RTW89_CN][9] = 58,
[0][0][2][0][RTW89_QATAR][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 58,
[0][0][2][0][RTW89_FCC][10] = 66,
[0][0][2][0][RTW89_ETSI][10] = 58,
[0][0][2][0][RTW89_MKK][10] = 76,
@@ -44597,6 +44691,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][10] = 66,
[0][0][2][0][RTW89_CN][10] = 58,
[0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 58,
[0][0][2][0][RTW89_FCC][11] = 54,
[0][0][2][0][RTW89_ETSI][11] = 58,
[0][0][2][0][RTW89_MKK][11] = 76,
@@ -44608,6 +44703,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][11] = 54,
[0][0][2][0][RTW89_CN][11] = 58,
[0][0][2][0][RTW89_QATAR][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 58,
[0][0][2][0][RTW89_FCC][12] = 50,
[0][0][2][0][RTW89_ETSI][12] = 58,
[0][0][2][0][RTW89_MKK][12] = 76,
@@ -44619,6 +44715,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][12] = 50,
[0][0][2][0][RTW89_CN][12] = 58,
[0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 58,
[0][0][2][0][RTW89_FCC][13] = 127,
[0][0][2][0][RTW89_ETSI][13] = 127,
[0][0][2][0][RTW89_MKK][13] = 127,
@@ -44630,6 +44727,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][13] = 127,
[0][0][2][0][RTW89_CN][13] = 127,
[0][0][2][0][RTW89_QATAR][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
[0][1][2][0][RTW89_FCC][0] = 62,
[0][1][2][0][RTW89_ETSI][0] = 46,
[0][1][2][0][RTW89_MKK][0] = 64,
@@ -44641,6 +44739,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][0] = 62,
[0][1][2][0][RTW89_CN][0] = 46,
[0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 46,
[0][1][2][0][RTW89_FCC][1] = 62,
[0][1][2][0][RTW89_ETSI][1] = 46,
[0][1][2][0][RTW89_MKK][1] = 64,
@@ -44652,6 +44751,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][1] = 62,
[0][1][2][0][RTW89_CN][1] = 46,
[0][1][2][0][RTW89_QATAR][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 46,
[0][1][2][0][RTW89_FCC][2] = 66,
[0][1][2][0][RTW89_ETSI][2] = 46,
[0][1][2][0][RTW89_MKK][2] = 64,
@@ -44663,6 +44763,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][2] = 66,
[0][1][2][0][RTW89_CN][2] = 46,
[0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 46,
[0][1][2][0][RTW89_FCC][3] = 70,
[0][1][2][0][RTW89_ETSI][3] = 46,
[0][1][2][0][RTW89_MKK][3] = 64,
@@ -44674,6 +44775,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][3] = 70,
[0][1][2][0][RTW89_CN][3] = 46,
[0][1][2][0][RTW89_QATAR][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 46,
[0][1][2][0][RTW89_FCC][4] = 76,
[0][1][2][0][RTW89_ETSI][4] = 46,
[0][1][2][0][RTW89_MKK][4] = 64,
@@ -44685,6 +44787,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][4] = 76,
[0][1][2][0][RTW89_CN][4] = 46,
[0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 46,
[0][1][2][0][RTW89_FCC][5] = 76,
[0][1][2][0][RTW89_ETSI][5] = 46,
[0][1][2][0][RTW89_MKK][5] = 64,
@@ -44696,6 +44799,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][5] = 76,
[0][1][2][0][RTW89_CN][5] = 46,
[0][1][2][0][RTW89_QATAR][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 46,
[0][1][2][0][RTW89_FCC][6] = 76,
[0][1][2][0][RTW89_ETSI][6] = 46,
[0][1][2][0][RTW89_MKK][6] = 64,
@@ -44707,6 +44811,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][6] = 76,
[0][1][2][0][RTW89_CN][6] = 46,
[0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 46,
[0][1][2][0][RTW89_FCC][7] = 68,
[0][1][2][0][RTW89_ETSI][7] = 46,
[0][1][2][0][RTW89_MKK][7] = 64,
@@ -44718,6 +44823,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][7] = 68,
[0][1][2][0][RTW89_CN][7] = 46,
[0][1][2][0][RTW89_QATAR][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 46,
[0][1][2][0][RTW89_FCC][8] = 64,
[0][1][2][0][RTW89_ETSI][8] = 46,
[0][1][2][0][RTW89_MKK][8] = 64,
@@ -44729,6 +44835,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][8] = 64,
[0][1][2][0][RTW89_CN][8] = 46,
[0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 46,
[0][1][2][0][RTW89_FCC][9] = 60,
[0][1][2][0][RTW89_ETSI][9] = 46,
[0][1][2][0][RTW89_MKK][9] = 64,
@@ -44740,6 +44847,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][9] = 60,
[0][1][2][0][RTW89_CN][9] = 46,
[0][1][2][0][RTW89_QATAR][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 46,
[0][1][2][0][RTW89_FCC][10] = 60,
[0][1][2][0][RTW89_ETSI][10] = 46,
[0][1][2][0][RTW89_MKK][10] = 64,
@@ -44751,6 +44859,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][10] = 60,
[0][1][2][0][RTW89_CN][10] = 46,
[0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 46,
[0][1][2][0][RTW89_FCC][11] = 42,
[0][1][2][0][RTW89_ETSI][11] = 46,
[0][1][2][0][RTW89_MKK][11] = 64,
@@ -44762,6 +44871,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][11] = 42,
[0][1][2][0][RTW89_CN][11] = 46,
[0][1][2][0][RTW89_QATAR][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 46,
[0][1][2][0][RTW89_FCC][12] = 40,
[0][1][2][0][RTW89_ETSI][12] = 46,
[0][1][2][0][RTW89_MKK][12] = 64,
@@ -44773,6 +44883,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][12] = 40,
[0][1][2][0][RTW89_CN][12] = 46,
[0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 46,
[0][1][2][0][RTW89_FCC][13] = 127,
[0][1][2][0][RTW89_ETSI][13] = 127,
[0][1][2][0][RTW89_MKK][13] = 127,
@@ -44784,6 +44895,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][13] = 127,
[0][1][2][0][RTW89_CN][13] = 127,
[0][1][2][0][RTW89_QATAR][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
[0][1][2][1][RTW89_FCC][0] = 62,
[0][1][2][1][RTW89_ETSI][0] = 34,
[0][1][2][1][RTW89_MKK][0] = 64,
@@ -44795,6 +44907,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][0] = 62,
[0][1][2][1][RTW89_CN][0] = 34,
[0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_UK][0] = 34,
[0][1][2][1][RTW89_FCC][1] = 62,
[0][1][2][1][RTW89_ETSI][1] = 34,
[0][1][2][1][RTW89_MKK][1] = 64,
@@ -44806,6 +44919,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][1] = 62,
[0][1][2][1][RTW89_CN][1] = 34,
[0][1][2][1][RTW89_QATAR][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 34,
[0][1][2][1][RTW89_FCC][2] = 66,
[0][1][2][1][RTW89_ETSI][2] = 34,
[0][1][2][1][RTW89_MKK][2] = 64,
@@ -44817,6 +44931,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][2] = 66,
[0][1][2][1][RTW89_CN][2] = 34,
[0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 34,
[0][1][2][1][RTW89_FCC][3] = 70,
[0][1][2][1][RTW89_ETSI][3] = 34,
[0][1][2][1][RTW89_MKK][3] = 64,
@@ -44828,6 +44943,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][3] = 70,
[0][1][2][1][RTW89_CN][3] = 34,
[0][1][2][1][RTW89_QATAR][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 34,
[0][1][2][1][RTW89_FCC][4] = 76,
[0][1][2][1][RTW89_ETSI][4] = 34,
[0][1][2][1][RTW89_MKK][4] = 64,
@@ -44839,6 +44955,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][4] = 76,
[0][1][2][1][RTW89_CN][4] = 34,
[0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 34,
[0][1][2][1][RTW89_FCC][5] = 76,
[0][1][2][1][RTW89_ETSI][5] = 34,
[0][1][2][1][RTW89_MKK][5] = 64,
@@ -44850,6 +44967,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][5] = 76,
[0][1][2][1][RTW89_CN][5] = 34,
[0][1][2][1][RTW89_QATAR][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 34,
[0][1][2][1][RTW89_FCC][6] = 76,
[0][1][2][1][RTW89_ETSI][6] = 34,
[0][1][2][1][RTW89_MKK][6] = 64,
@@ -44861,6 +44979,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][6] = 76,
[0][1][2][1][RTW89_CN][6] = 34,
[0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 34,
[0][1][2][1][RTW89_FCC][7] = 68,
[0][1][2][1][RTW89_ETSI][7] = 34,
[0][1][2][1][RTW89_MKK][7] = 64,
@@ -44872,6 +44991,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][7] = 68,
[0][1][2][1][RTW89_CN][7] = 34,
[0][1][2][1][RTW89_QATAR][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 34,
[0][1][2][1][RTW89_FCC][8] = 64,
[0][1][2][1][RTW89_ETSI][8] = 34,
[0][1][2][1][RTW89_MKK][8] = 64,
@@ -44883,6 +45003,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][8] = 64,
[0][1][2][1][RTW89_CN][8] = 34,
[0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 34,
[0][1][2][1][RTW89_FCC][9] = 60,
[0][1][2][1][RTW89_ETSI][9] = 34,
[0][1][2][1][RTW89_MKK][9] = 64,
@@ -44894,6 +45015,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][9] = 60,
[0][1][2][1][RTW89_CN][9] = 34,
[0][1][2][1][RTW89_QATAR][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 34,
[0][1][2][1][RTW89_FCC][10] = 60,
[0][1][2][1][RTW89_ETSI][10] = 34,
[0][1][2][1][RTW89_MKK][10] = 64,
@@ -44905,6 +45027,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][10] = 60,
[0][1][2][1][RTW89_CN][10] = 34,
[0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 34,
[0][1][2][1][RTW89_FCC][11] = 42,
[0][1][2][1][RTW89_ETSI][11] = 34,
[0][1][2][1][RTW89_MKK][11] = 64,
@@ -44916,6 +45039,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][11] = 42,
[0][1][2][1][RTW89_CN][11] = 34,
[0][1][2][1][RTW89_QATAR][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 34,
[0][1][2][1][RTW89_FCC][12] = 40,
[0][1][2][1][RTW89_ETSI][12] = 34,
[0][1][2][1][RTW89_MKK][12] = 64,
@@ -44927,6 +45051,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][12] = 40,
[0][1][2][1][RTW89_CN][12] = 34,
[0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 34,
[0][1][2][1][RTW89_FCC][13] = 127,
[0][1][2][1][RTW89_ETSI][13] = 127,
[0][1][2][1][RTW89_MKK][13] = 127,
@@ -44938,6 +45063,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][13] = 127,
[0][1][2][1][RTW89_CN][13] = 127,
[0][1][2][1][RTW89_QATAR][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
[1][0][2][0][RTW89_FCC][0] = 127,
[1][0][2][0][RTW89_ETSI][0] = 127,
[1][0][2][0][RTW89_MKK][0] = 127,
@@ -44949,6 +45075,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][0] = 127,
[1][0][2][0][RTW89_CN][0] = 127,
[1][0][2][0][RTW89_QATAR][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
[1][0][2][0][RTW89_FCC][1] = 127,
[1][0][2][0][RTW89_ETSI][1] = 127,
[1][0][2][0][RTW89_MKK][1] = 127,
@@ -44960,6 +45087,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][1] = 127,
[1][0][2][0][RTW89_CN][1] = 127,
[1][0][2][0][RTW89_QATAR][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
[1][0][2][0][RTW89_FCC][2] = 56,
[1][0][2][0][RTW89_ETSI][2] = 58,
[1][0][2][0][RTW89_MKK][2] = 68,
@@ -44971,6 +45099,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][2] = 56,
[1][0][2][0][RTW89_CN][2] = 58,
[1][0][2][0][RTW89_QATAR][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 58,
[1][0][2][0][RTW89_FCC][3] = 56,
[1][0][2][0][RTW89_ETSI][3] = 58,
[1][0][2][0][RTW89_MKK][3] = 68,
@@ -44982,6 +45111,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][3] = 56,
[1][0][2][0][RTW89_CN][3] = 58,
[1][0][2][0][RTW89_QATAR][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 58,
[1][0][2][0][RTW89_FCC][4] = 60,
[1][0][2][0][RTW89_ETSI][4] = 58,
[1][0][2][0][RTW89_MKK][4] = 68,
@@ -44993,6 +45123,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][4] = 60,
[1][0][2][0][RTW89_CN][4] = 58,
[1][0][2][0][RTW89_QATAR][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 58,
[1][0][2][0][RTW89_FCC][5] = 64,
[1][0][2][0][RTW89_ETSI][5] = 58,
[1][0][2][0][RTW89_MKK][5] = 68,
@@ -45004,6 +45135,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][5] = 64,
[1][0][2][0][RTW89_CN][5] = 58,
[1][0][2][0][RTW89_QATAR][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 58,
[1][0][2][0][RTW89_FCC][6] = 54,
[1][0][2][0][RTW89_ETSI][6] = 58,
[1][0][2][0][RTW89_MKK][6] = 68,
@@ -45015,6 +45147,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][6] = 54,
[1][0][2][0][RTW89_CN][6] = 58,
[1][0][2][0][RTW89_QATAR][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 58,
[1][0][2][0][RTW89_FCC][7] = 50,
[1][0][2][0][RTW89_ETSI][7] = 58,
[1][0][2][0][RTW89_MKK][7] = 68,
@@ -45026,6 +45159,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][7] = 50,
[1][0][2][0][RTW89_CN][7] = 58,
[1][0][2][0][RTW89_QATAR][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 58,
[1][0][2][0][RTW89_FCC][8] = 50,
[1][0][2][0][RTW89_ETSI][8] = 58,
[1][0][2][0][RTW89_MKK][8] = 68,
@@ -45037,6 +45171,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][8] = 50,
[1][0][2][0][RTW89_CN][8] = 58,
[1][0][2][0][RTW89_QATAR][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 58,
[1][0][2][0][RTW89_FCC][9] = 42,
[1][0][2][0][RTW89_ETSI][9] = 58,
[1][0][2][0][RTW89_MKK][9] = 68,
@@ -45048,6 +45183,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][9] = 42,
[1][0][2][0][RTW89_CN][9] = 58,
[1][0][2][0][RTW89_QATAR][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 58,
[1][0][2][0][RTW89_FCC][10] = 40,
[1][0][2][0][RTW89_ETSI][10] = 58,
[1][0][2][0][RTW89_MKK][10] = 68,
@@ -45059,6 +45195,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][10] = 40,
[1][0][2][0][RTW89_CN][10] = 58,
[1][0][2][0][RTW89_QATAR][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 58,
[1][0][2][0][RTW89_FCC][11] = 127,
[1][0][2][0][RTW89_ETSI][11] = 127,
[1][0][2][0][RTW89_MKK][11] = 127,
@@ -45070,6 +45207,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][11] = 127,
[1][0][2][0][RTW89_CN][11] = 127,
[1][0][2][0][RTW89_QATAR][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
[1][0][2][0][RTW89_FCC][12] = 127,
[1][0][2][0][RTW89_ETSI][12] = 127,
[1][0][2][0][RTW89_MKK][12] = 127,
@@ -45081,6 +45219,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][12] = 127,
[1][0][2][0][RTW89_CN][12] = 127,
[1][0][2][0][RTW89_QATAR][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
[1][0][2][0][RTW89_FCC][13] = 127,
[1][0][2][0][RTW89_ETSI][13] = 127,
[1][0][2][0][RTW89_MKK][13] = 127,
@@ -45092,6 +45231,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][13] = 127,
[1][0][2][0][RTW89_CN][13] = 127,
[1][0][2][0][RTW89_QATAR][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
[1][1][2][0][RTW89_FCC][0] = 127,
[1][1][2][0][RTW89_ETSI][0] = 127,
[1][1][2][0][RTW89_MKK][0] = 127,
@@ -45103,6 +45243,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][0] = 127,
[1][1][2][0][RTW89_CN][0] = 127,
[1][1][2][0][RTW89_QATAR][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
[1][1][2][0][RTW89_FCC][1] = 127,
[1][1][2][0][RTW89_ETSI][1] = 127,
[1][1][2][0][RTW89_MKK][1] = 127,
@@ -45114,6 +45255,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][1] = 127,
[1][1][2][0][RTW89_CN][1] = 127,
[1][1][2][0][RTW89_QATAR][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
[1][1][2][0][RTW89_FCC][2] = 52,
[1][1][2][0][RTW89_ETSI][2] = 46,
[1][1][2][0][RTW89_MKK][2] = 64,
@@ -45125,6 +45267,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][2] = 52,
[1][1][2][0][RTW89_CN][2] = 46,
[1][1][2][0][RTW89_QATAR][2] = 46,
+ [1][1][2][0][RTW89_UK][2] = 46,
[1][1][2][0][RTW89_FCC][3] = 52,
[1][1][2][0][RTW89_ETSI][3] = 46,
[1][1][2][0][RTW89_MKK][3] = 64,
@@ -45136,6 +45279,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][3] = 52,
[1][1][2][0][RTW89_CN][3] = 46,
[1][1][2][0][RTW89_QATAR][3] = 46,
+ [1][1][2][0][RTW89_UK][3] = 46,
[1][1][2][0][RTW89_FCC][4] = 56,
[1][1][2][0][RTW89_ETSI][4] = 46,
[1][1][2][0][RTW89_MKK][4] = 64,
@@ -45147,6 +45291,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][4] = 56,
[1][1][2][0][RTW89_CN][4] = 46,
[1][1][2][0][RTW89_QATAR][4] = 46,
+ [1][1][2][0][RTW89_UK][4] = 46,
[1][1][2][0][RTW89_FCC][5] = 60,
[1][1][2][0][RTW89_ETSI][5] = 46,
[1][1][2][0][RTW89_MKK][5] = 64,
@@ -45158,6 +45303,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][5] = 60,
[1][1][2][0][RTW89_CN][5] = 46,
[1][1][2][0][RTW89_QATAR][5] = 46,
+ [1][1][2][0][RTW89_UK][5] = 46,
[1][1][2][0][RTW89_FCC][6] = 54,
[1][1][2][0][RTW89_ETSI][6] = 46,
[1][1][2][0][RTW89_MKK][6] = 64,
@@ -45169,6 +45315,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][6] = 54,
[1][1][2][0][RTW89_CN][6] = 46,
[1][1][2][0][RTW89_QATAR][6] = 46,
+ [1][1][2][0][RTW89_UK][6] = 46,
[1][1][2][0][RTW89_FCC][7] = 50,
[1][1][2][0][RTW89_ETSI][7] = 46,
[1][1][2][0][RTW89_MKK][7] = 64,
@@ -45180,6 +45327,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][7] = 50,
[1][1][2][0][RTW89_CN][7] = 46,
[1][1][2][0][RTW89_QATAR][7] = 46,
+ [1][1][2][0][RTW89_UK][7] = 46,
[1][1][2][0][RTW89_FCC][8] = 50,
[1][1][2][0][RTW89_ETSI][8] = 46,
[1][1][2][0][RTW89_MKK][8] = 64,
@@ -45191,6 +45339,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][8] = 50,
[1][1][2][0][RTW89_CN][8] = 46,
[1][1][2][0][RTW89_QATAR][8] = 46,
+ [1][1][2][0][RTW89_UK][8] = 46,
[1][1][2][0][RTW89_FCC][9] = 38,
[1][1][2][0][RTW89_ETSI][9] = 46,
[1][1][2][0][RTW89_MKK][9] = 64,
@@ -45202,6 +45351,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][9] = 38,
[1][1][2][0][RTW89_CN][9] = 46,
[1][1][2][0][RTW89_QATAR][9] = 46,
+ [1][1][2][0][RTW89_UK][9] = 46,
[1][1][2][0][RTW89_FCC][10] = 36,
[1][1][2][0][RTW89_ETSI][10] = 46,
[1][1][2][0][RTW89_MKK][10] = 64,
@@ -45213,6 +45363,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][10] = 36,
[1][1][2][0][RTW89_CN][10] = 46,
[1][1][2][0][RTW89_QATAR][10] = 46,
+ [1][1][2][0][RTW89_UK][10] = 46,
[1][1][2][0][RTW89_FCC][11] = 127,
[1][1][2][0][RTW89_ETSI][11] = 127,
[1][1][2][0][RTW89_MKK][11] = 127,
@@ -45224,6 +45375,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][11] = 127,
[1][1][2][0][RTW89_CN][11] = 127,
[1][1][2][0][RTW89_QATAR][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
[1][1][2][0][RTW89_FCC][12] = 127,
[1][1][2][0][RTW89_ETSI][12] = 127,
[1][1][2][0][RTW89_MKK][12] = 127,
@@ -45235,6 +45387,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][12] = 127,
[1][1][2][0][RTW89_CN][12] = 127,
[1][1][2][0][RTW89_QATAR][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
[1][1][2][0][RTW89_FCC][13] = 127,
[1][1][2][0][RTW89_ETSI][13] = 127,
[1][1][2][0][RTW89_MKK][13] = 127,
@@ -45246,6 +45399,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][13] = 127,
[1][1][2][0][RTW89_CN][13] = 127,
[1][1][2][0][RTW89_QATAR][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
[1][1][2][1][RTW89_FCC][0] = 127,
[1][1][2][1][RTW89_ETSI][0] = 127,
[1][1][2][1][RTW89_MKK][0] = 127,
@@ -45257,6 +45411,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][0] = 127,
[1][1][2][1][RTW89_CN][0] = 127,
[1][1][2][1][RTW89_QATAR][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
[1][1][2][1][RTW89_FCC][1] = 127,
[1][1][2][1][RTW89_ETSI][1] = 127,
[1][1][2][1][RTW89_MKK][1] = 127,
@@ -45268,6 +45423,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][1] = 127,
[1][1][2][1][RTW89_CN][1] = 127,
[1][1][2][1][RTW89_QATAR][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
[1][1][2][1][RTW89_FCC][2] = 52,
[1][1][2][1][RTW89_ETSI][2] = 34,
[1][1][2][1][RTW89_MKK][2] = 64,
@@ -45279,6 +45435,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][2] = 52,
[1][1][2][1][RTW89_CN][2] = 34,
[1][1][2][1][RTW89_QATAR][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 34,
[1][1][2][1][RTW89_FCC][3] = 52,
[1][1][2][1][RTW89_ETSI][3] = 34,
[1][1][2][1][RTW89_MKK][3] = 64,
@@ -45290,6 +45447,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][3] = 52,
[1][1][2][1][RTW89_CN][3] = 34,
[1][1][2][1][RTW89_QATAR][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 34,
[1][1][2][1][RTW89_FCC][4] = 56,
[1][1][2][1][RTW89_ETSI][4] = 34,
[1][1][2][1][RTW89_MKK][4] = 64,
@@ -45301,6 +45459,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][4] = 56,
[1][1][2][1][RTW89_CN][4] = 34,
[1][1][2][1][RTW89_QATAR][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 34,
[1][1][2][1][RTW89_FCC][5] = 60,
[1][1][2][1][RTW89_ETSI][5] = 34,
[1][1][2][1][RTW89_MKK][5] = 64,
@@ -45312,6 +45471,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][5] = 60,
[1][1][2][1][RTW89_CN][5] = 34,
[1][1][2][1][RTW89_QATAR][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 34,
[1][1][2][1][RTW89_FCC][6] = 54,
[1][1][2][1][RTW89_ETSI][6] = 34,
[1][1][2][1][RTW89_MKK][6] = 64,
@@ -45323,6 +45483,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][6] = 54,
[1][1][2][1][RTW89_CN][6] = 34,
[1][1][2][1][RTW89_QATAR][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 34,
[1][1][2][1][RTW89_FCC][7] = 50,
[1][1][2][1][RTW89_ETSI][7] = 34,
[1][1][2][1][RTW89_MKK][7] = 64,
@@ -45334,6 +45495,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][7] = 50,
[1][1][2][1][RTW89_CN][7] = 34,
[1][1][2][1][RTW89_QATAR][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 34,
[1][1][2][1][RTW89_FCC][8] = 50,
[1][1][2][1][RTW89_ETSI][8] = 34,
[1][1][2][1][RTW89_MKK][8] = 64,
@@ -45345,6 +45507,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][8] = 50,
[1][1][2][1][RTW89_CN][8] = 34,
[1][1][2][1][RTW89_QATAR][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 34,
[1][1][2][1][RTW89_FCC][9] = 38,
[1][1][2][1][RTW89_ETSI][9] = 34,
[1][1][2][1][RTW89_MKK][9] = 64,
@@ -45356,6 +45519,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][9] = 38,
[1][1][2][1][RTW89_CN][9] = 34,
[1][1][2][1][RTW89_QATAR][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 34,
[1][1][2][1][RTW89_FCC][10] = 36,
[1][1][2][1][RTW89_ETSI][10] = 34,
[1][1][2][1][RTW89_MKK][10] = 64,
@@ -45367,6 +45531,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][10] = 36,
[1][1][2][1][RTW89_CN][10] = 34,
[1][1][2][1][RTW89_QATAR][10] = 34,
+ [1][1][2][1][RTW89_UK][10] = 34,
[1][1][2][1][RTW89_FCC][11] = 127,
[1][1][2][1][RTW89_ETSI][11] = 127,
[1][1][2][1][RTW89_MKK][11] = 127,
@@ -45378,6 +45543,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][11] = 127,
[1][1][2][1][RTW89_CN][11] = 127,
[1][1][2][1][RTW89_QATAR][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
[1][1][2][1][RTW89_FCC][12] = 127,
[1][1][2][1][RTW89_ETSI][12] = 127,
[1][1][2][1][RTW89_MKK][12] = 127,
@@ -45389,6 +45555,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][12] = 127,
[1][1][2][1][RTW89_CN][12] = 127,
[1][1][2][1][RTW89_QATAR][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
[1][1][2][1][RTW89_FCC][13] = 127,
[1][1][2][1][RTW89_ETSI][13] = 127,
[1][1][2][1][RTW89_MKK][13] = 127,
@@ -45400,6 +45567,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][13] = 127,
[1][1][2][1][RTW89_CN][13] = 127,
[1][1][2][1][RTW89_QATAR][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
@@ -45595,6 +45763,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][0] = 62,
[0][0][1][0][RTW89_CN][0] = 58,
[0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
[0][0][1][0][RTW89_FCC][2] = 76,
[0][0][1][0][RTW89_ETSI][2] = 58,
[0][0][1][0][RTW89_MKK][2] = 62,
@@ -45606,6 +45775,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][2] = 62,
[0][0][1][0][RTW89_CN][2] = 58,
[0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
[0][0][1][0][RTW89_FCC][4] = 76,
[0][0][1][0][RTW89_ETSI][4] = 58,
[0][0][1][0][RTW89_MKK][4] = 62,
@@ -45617,6 +45787,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][4] = 62,
[0][0][1][0][RTW89_CN][4] = 58,
[0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
[0][0][1][0][RTW89_FCC][6] = 76,
[0][0][1][0][RTW89_ETSI][6] = 58,
[0][0][1][0][RTW89_MKK][6] = 62,
@@ -45628,6 +45799,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][6] = 62,
[0][0][1][0][RTW89_CN][6] = 58,
[0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
[0][0][1][0][RTW89_FCC][8] = 76,
[0][0][1][0][RTW89_ETSI][8] = 58,
[0][0][1][0][RTW89_MKK][8] = 62,
@@ -45639,6 +45811,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][8] = 76,
[0][0][1][0][RTW89_CN][8] = 58,
[0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
[0][0][1][0][RTW89_FCC][10] = 76,
[0][0][1][0][RTW89_ETSI][10] = 58,
[0][0][1][0][RTW89_MKK][10] = 62,
@@ -45650,6 +45823,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][10] = 76,
[0][0][1][0][RTW89_CN][10] = 58,
[0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
[0][0][1][0][RTW89_FCC][12] = 76,
[0][0][1][0][RTW89_ETSI][12] = 58,
[0][0][1][0][RTW89_MKK][12] = 62,
@@ -45661,6 +45835,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][12] = 76,
[0][0][1][0][RTW89_CN][12] = 58,
[0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
[0][0][1][0][RTW89_FCC][14] = 76,
[0][0][1][0][RTW89_ETSI][14] = 58,
[0][0][1][0][RTW89_MKK][14] = 62,
@@ -45672,6 +45847,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 76,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -45683,6 +45859,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][15] = 76,
[0][0][1][0][RTW89_CN][15] = 127,
[0][0][1][0][RTW89_QATAR][15] = 52,
+ [0][0][1][0][RTW89_UK][15] = 58,
[0][0][1][0][RTW89_FCC][17] = 76,
[0][0][1][0][RTW89_ETSI][17] = 58,
[0][0][1][0][RTW89_MKK][17] = 76,
@@ -45694,6 +45871,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][17] = 76,
[0][0][1][0][RTW89_CN][17] = 127,
[0][0][1][0][RTW89_QATAR][17] = 52,
+ [0][0][1][0][RTW89_UK][17] = 58,
[0][0][1][0][RTW89_FCC][19] = 76,
[0][0][1][0][RTW89_ETSI][19] = 58,
[0][0][1][0][RTW89_MKK][19] = 76,
@@ -45705,6 +45883,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][19] = 76,
[0][0][1][0][RTW89_CN][19] = 127,
[0][0][1][0][RTW89_QATAR][19] = 52,
+ [0][0][1][0][RTW89_UK][19] = 58,
[0][0][1][0][RTW89_FCC][21] = 76,
[0][0][1][0][RTW89_ETSI][21] = 58,
[0][0][1][0][RTW89_MKK][21] = 76,
@@ -45716,6 +45895,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][21] = 76,
[0][0][1][0][RTW89_CN][21] = 127,
[0][0][1][0][RTW89_QATAR][21] = 52,
+ [0][0][1][0][RTW89_UK][21] = 58,
[0][0][1][0][RTW89_FCC][23] = 76,
[0][0][1][0][RTW89_ETSI][23] = 58,
[0][0][1][0][RTW89_MKK][23] = 76,
@@ -45727,6 +45907,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][23] = 76,
[0][0][1][0][RTW89_CN][23] = 127,
[0][0][1][0][RTW89_QATAR][23] = 52,
+ [0][0][1][0][RTW89_UK][23] = 58,
[0][0][1][0][RTW89_FCC][25] = 76,
[0][0][1][0][RTW89_ETSI][25] = 58,
[0][0][1][0][RTW89_MKK][25] = 76,
@@ -45738,6 +45919,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][25] = 76,
[0][0][1][0][RTW89_CN][25] = 127,
[0][0][1][0][RTW89_QATAR][25] = 52,
+ [0][0][1][0][RTW89_UK][25] = 58,
[0][0][1][0][RTW89_FCC][27] = 76,
[0][0][1][0][RTW89_ETSI][27] = 58,
[0][0][1][0][RTW89_MKK][27] = 76,
@@ -45749,6 +45931,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][27] = 76,
[0][0][1][0][RTW89_CN][27] = 127,
[0][0][1][0][RTW89_QATAR][27] = 52,
+ [0][0][1][0][RTW89_UK][27] = 58,
[0][0][1][0][RTW89_FCC][29] = 76,
[0][0][1][0][RTW89_ETSI][29] = 58,
[0][0][1][0][RTW89_MKK][29] = 76,
@@ -45760,6 +45943,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][29] = 76,
[0][0][1][0][RTW89_CN][29] = 127,
[0][0][1][0][RTW89_QATAR][29] = 52,
+ [0][0][1][0][RTW89_UK][29] = 58,
[0][0][1][0][RTW89_FCC][31] = 76,
[0][0][1][0][RTW89_ETSI][31] = 58,
[0][0][1][0][RTW89_MKK][31] = 76,
@@ -45771,6 +45955,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][31] = 76,
[0][0][1][0][RTW89_CN][31] = 127,
[0][0][1][0][RTW89_QATAR][31] = 52,
+ [0][0][1][0][RTW89_UK][31] = 58,
[0][0][1][0][RTW89_FCC][33] = 76,
[0][0][1][0][RTW89_ETSI][33] = 58,
[0][0][1][0][RTW89_MKK][33] = 76,
@@ -45782,6 +45967,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][33] = 76,
[0][0][1][0][RTW89_CN][33] = 127,
[0][0][1][0][RTW89_QATAR][33] = 52,
+ [0][0][1][0][RTW89_UK][33] = 58,
[0][0][1][0][RTW89_FCC][35] = 74,
[0][0][1][0][RTW89_ETSI][35] = 58,
[0][0][1][0][RTW89_MKK][35] = 76,
@@ -45793,6 +45979,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][35] = 74,
[0][0][1][0][RTW89_CN][35] = 127,
[0][0][1][0][RTW89_QATAR][35] = 52,
+ [0][0][1][0][RTW89_UK][35] = 58,
[0][0][1][0][RTW89_FCC][37] = 76,
[0][0][1][0][RTW89_ETSI][37] = 127,
[0][0][1][0][RTW89_MKK][37] = 76,
@@ -45804,6 +45991,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][37] = 76,
[0][0][1][0][RTW89_CN][37] = 127,
[0][0][1][0][RTW89_QATAR][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 76,
[0][0][1][0][RTW89_FCC][38] = 76,
[0][0][1][0][RTW89_ETSI][38] = 28,
[0][0][1][0][RTW89_MKK][38] = 127,
@@ -45815,6 +46003,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][38] = 76,
[0][0][1][0][RTW89_CN][38] = 72,
[0][0][1][0][RTW89_QATAR][38] = 28,
+ [0][0][1][0][RTW89_UK][38] = 56,
[0][0][1][0][RTW89_FCC][40] = 76,
[0][0][1][0][RTW89_ETSI][40] = 28,
[0][0][1][0][RTW89_MKK][40] = 127,
@@ -45826,6 +46015,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][40] = 76,
[0][0][1][0][RTW89_CN][40] = 76,
[0][0][1][0][RTW89_QATAR][40] = 28,
+ [0][0][1][0][RTW89_UK][40] = 56,
[0][0][1][0][RTW89_FCC][42] = 76,
[0][0][1][0][RTW89_ETSI][42] = 28,
[0][0][1][0][RTW89_MKK][42] = 127,
@@ -45837,6 +46027,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][42] = 76,
[0][0][1][0][RTW89_CN][42] = 76,
[0][0][1][0][RTW89_QATAR][42] = 28,
+ [0][0][1][0][RTW89_UK][42] = 56,
[0][0][1][0][RTW89_FCC][44] = 76,
[0][0][1][0][RTW89_ETSI][44] = 28,
[0][0][1][0][RTW89_MKK][44] = 127,
@@ -45848,6 +46039,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][44] = 76,
[0][0][1][0][RTW89_CN][44] = 76,
[0][0][1][0][RTW89_QATAR][44] = 28,
+ [0][0][1][0][RTW89_UK][44] = 56,
[0][0][1][0][RTW89_FCC][46] = 76,
[0][0][1][0][RTW89_ETSI][46] = 28,
[0][0][1][0][RTW89_MKK][46] = 127,
@@ -45859,6 +46051,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][46] = 76,
[0][0][1][0][RTW89_CN][46] = 76,
[0][0][1][0][RTW89_QATAR][46] = 28,
+ [0][0][1][0][RTW89_UK][46] = 56,
[0][1][1][0][RTW89_FCC][0] = 68,
[0][1][1][0][RTW89_ETSI][0] = 46,
[0][1][1][0][RTW89_MKK][0] = 50,
@@ -45870,6 +46063,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 50,
@@ -45881,6 +46075,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][2] = 50,
[0][1][1][0][RTW89_CN][2] = 46,
[0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
[0][1][1][0][RTW89_FCC][4] = 68,
[0][1][1][0][RTW89_ETSI][4] = 46,
[0][1][1][0][RTW89_MKK][4] = 50,
@@ -45892,6 +46087,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][4] = 50,
[0][1][1][0][RTW89_CN][4] = 46,
[0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
[0][1][1][0][RTW89_FCC][6] = 68,
[0][1][1][0][RTW89_ETSI][6] = 46,
[0][1][1][0][RTW89_MKK][6] = 50,
@@ -45903,6 +46099,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][6] = 50,
[0][1][1][0][RTW89_CN][6] = 46,
[0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
[0][1][1][0][RTW89_FCC][8] = 68,
[0][1][1][0][RTW89_ETSI][8] = 46,
[0][1][1][0][RTW89_MKK][8] = 50,
@@ -45914,6 +46111,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][8] = 68,
[0][1][1][0][RTW89_CN][8] = 46,
[0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
[0][1][1][0][RTW89_FCC][10] = 68,
[0][1][1][0][RTW89_ETSI][10] = 46,
[0][1][1][0][RTW89_MKK][10] = 50,
@@ -45925,6 +46123,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][10] = 68,
[0][1][1][0][RTW89_CN][10] = 46,
[0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
[0][1][1][0][RTW89_FCC][12] = 68,
[0][1][1][0][RTW89_ETSI][12] = 46,
[0][1][1][0][RTW89_MKK][12] = 50,
@@ -45936,6 +46135,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][12] = 68,
[0][1][1][0][RTW89_CN][12] = 46,
[0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
[0][1][1][0][RTW89_FCC][14] = 68,
[0][1][1][0][RTW89_ETSI][14] = 46,
[0][1][1][0][RTW89_MKK][14] = 50,
@@ -45947,6 +46147,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][14] = 68,
[0][1][1][0][RTW89_CN][14] = 46,
[0][1][1][0][RTW89_QATAR][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 46,
[0][1][1][0][RTW89_FCC][15] = 68,
[0][1][1][0][RTW89_ETSI][15] = 46,
[0][1][1][0][RTW89_MKK][15] = 70,
@@ -45958,6 +46159,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][15] = 68,
[0][1][1][0][RTW89_CN][15] = 127,
[0][1][1][0][RTW89_QATAR][15] = 40,
+ [0][1][1][0][RTW89_UK][15] = 46,
[0][1][1][0][RTW89_FCC][17] = 68,
[0][1][1][0][RTW89_ETSI][17] = 46,
[0][1][1][0][RTW89_MKK][17] = 70,
@@ -45969,6 +46171,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][17] = 68,
[0][1][1][0][RTW89_CN][17] = 127,
[0][1][1][0][RTW89_QATAR][17] = 40,
+ [0][1][1][0][RTW89_UK][17] = 46,
[0][1][1][0][RTW89_FCC][19] = 68,
[0][1][1][0][RTW89_ETSI][19] = 46,
[0][1][1][0][RTW89_MKK][19] = 70,
@@ -45980,6 +46183,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][19] = 68,
[0][1][1][0][RTW89_CN][19] = 127,
[0][1][1][0][RTW89_QATAR][19] = 40,
+ [0][1][1][0][RTW89_UK][19] = 46,
[0][1][1][0][RTW89_FCC][21] = 68,
[0][1][1][0][RTW89_ETSI][21] = 46,
[0][1][1][0][RTW89_MKK][21] = 70,
@@ -45991,6 +46195,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][21] = 68,
[0][1][1][0][RTW89_CN][21] = 127,
[0][1][1][0][RTW89_QATAR][21] = 40,
+ [0][1][1][0][RTW89_UK][21] = 46,
[0][1][1][0][RTW89_FCC][23] = 68,
[0][1][1][0][RTW89_ETSI][23] = 46,
[0][1][1][0][RTW89_MKK][23] = 70,
@@ -46002,6 +46207,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][23] = 68,
[0][1][1][0][RTW89_CN][23] = 127,
[0][1][1][0][RTW89_QATAR][23] = 40,
+ [0][1][1][0][RTW89_UK][23] = 46,
[0][1][1][0][RTW89_FCC][25] = 68,
[0][1][1][0][RTW89_ETSI][25] = 46,
[0][1][1][0][RTW89_MKK][25] = 70,
@@ -46013,6 +46219,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][25] = 68,
[0][1][1][0][RTW89_CN][25] = 127,
[0][1][1][0][RTW89_QATAR][25] = 40,
+ [0][1][1][0][RTW89_UK][25] = 46,
[0][1][1][0][RTW89_FCC][27] = 68,
[0][1][1][0][RTW89_ETSI][27] = 46,
[0][1][1][0][RTW89_MKK][27] = 70,
@@ -46024,6 +46231,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][27] = 68,
[0][1][1][0][RTW89_CN][27] = 127,
[0][1][1][0][RTW89_QATAR][27] = 40,
+ [0][1][1][0][RTW89_UK][27] = 46,
[0][1][1][0][RTW89_FCC][29] = 68,
[0][1][1][0][RTW89_ETSI][29] = 46,
[0][1][1][0][RTW89_MKK][29] = 70,
@@ -46035,6 +46243,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][29] = 68,
[0][1][1][0][RTW89_CN][29] = 127,
[0][1][1][0][RTW89_QATAR][29] = 40,
+ [0][1][1][0][RTW89_UK][29] = 46,
[0][1][1][0][RTW89_FCC][31] = 68,
[0][1][1][0][RTW89_ETSI][31] = 46,
[0][1][1][0][RTW89_MKK][31] = 70,
@@ -46046,6 +46255,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][31] = 68,
[0][1][1][0][RTW89_CN][31] = 127,
[0][1][1][0][RTW89_QATAR][31] = 40,
+ [0][1][1][0][RTW89_UK][31] = 46,
[0][1][1][0][RTW89_FCC][33] = 68,
[0][1][1][0][RTW89_ETSI][33] = 46,
[0][1][1][0][RTW89_MKK][33] = 70,
@@ -46057,6 +46267,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][33] = 68,
[0][1][1][0][RTW89_CN][33] = 127,
[0][1][1][0][RTW89_QATAR][33] = 40,
+ [0][1][1][0][RTW89_UK][33] = 46,
[0][1][1][0][RTW89_FCC][35] = 66,
[0][1][1][0][RTW89_ETSI][35] = 46,
[0][1][1][0][RTW89_MKK][35] = 70,
@@ -46068,6 +46279,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][35] = 66,
[0][1][1][0][RTW89_CN][35] = 127,
[0][1][1][0][RTW89_QATAR][35] = 40,
+ [0][1][1][0][RTW89_UK][35] = 46,
[0][1][1][0][RTW89_FCC][37] = 68,
[0][1][1][0][RTW89_ETSI][37] = 127,
[0][1][1][0][RTW89_MKK][37] = 70,
@@ -46079,6 +46291,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][37] = 68,
[0][1][1][0][RTW89_CN][37] = 127,
[0][1][1][0][RTW89_QATAR][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 74,
[0][1][1][0][RTW89_FCC][38] = 76,
[0][1][1][0][RTW89_ETSI][38] = 16,
[0][1][1][0][RTW89_MKK][38] = 127,
@@ -46090,6 +46303,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][38] = 76,
[0][1][1][0][RTW89_CN][38] = 72,
[0][1][1][0][RTW89_QATAR][38] = 16,
+ [0][1][1][0][RTW89_UK][38] = 44,
[0][1][1][0][RTW89_FCC][40] = 76,
[0][1][1][0][RTW89_ETSI][40] = 16,
[0][1][1][0][RTW89_MKK][40] = 127,
@@ -46101,6 +46315,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][40] = 76,
[0][1][1][0][RTW89_CN][40] = 76,
[0][1][1][0][RTW89_QATAR][40] = 16,
+ [0][1][1][0][RTW89_UK][40] = 44,
[0][1][1][0][RTW89_FCC][42] = 76,
[0][1][1][0][RTW89_ETSI][42] = 16,
[0][1][1][0][RTW89_MKK][42] = 127,
@@ -46112,6 +46327,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][42] = 76,
[0][1][1][0][RTW89_CN][42] = 76,
[0][1][1][0][RTW89_QATAR][42] = 16,
+ [0][1][1][0][RTW89_UK][42] = 44,
[0][1][1][0][RTW89_FCC][44] = 76,
[0][1][1][0][RTW89_ETSI][44] = 16,
[0][1][1][0][RTW89_MKK][44] = 127,
@@ -46123,6 +46339,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][44] = 76,
[0][1][1][0][RTW89_CN][44] = 76,
[0][1][1][0][RTW89_QATAR][44] = 16,
+ [0][1][1][0][RTW89_UK][44] = 44,
[0][1][1][0][RTW89_FCC][46] = 76,
[0][1][1][0][RTW89_ETSI][46] = 16,
[0][1][1][0][RTW89_MKK][46] = 127,
@@ -46134,6 +46351,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][46] = 76,
[0][1][1][0][RTW89_CN][46] = 76,
[0][1][1][0][RTW89_QATAR][46] = 16,
+ [0][1][1][0][RTW89_UK][46] = 44,
[0][0][2][0][RTW89_FCC][0] = 76,
[0][0][2][0][RTW89_ETSI][0] = 58,
[0][0][2][0][RTW89_MKK][0] = 62,
@@ -46145,6 +46363,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][0] = 62,
[0][0][2][0][RTW89_CN][0] = 58,
[0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 58,
[0][0][2][0][RTW89_FCC][2] = 76,
[0][0][2][0][RTW89_ETSI][2] = 58,
[0][0][2][0][RTW89_MKK][2] = 62,
@@ -46156,6 +46375,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][2] = 62,
[0][0][2][0][RTW89_CN][2] = 58,
[0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 58,
[0][0][2][0][RTW89_FCC][4] = 76,
[0][0][2][0][RTW89_ETSI][4] = 58,
[0][0][2][0][RTW89_MKK][4] = 62,
@@ -46167,6 +46387,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][4] = 62,
[0][0][2][0][RTW89_CN][4] = 58,
[0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 58,
[0][0][2][0][RTW89_FCC][6] = 76,
[0][0][2][0][RTW89_ETSI][6] = 58,
[0][0][2][0][RTW89_MKK][6] = 62,
@@ -46178,6 +46399,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][6] = 62,
[0][0][2][0][RTW89_CN][6] = 58,
[0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 58,
[0][0][2][0][RTW89_FCC][8] = 76,
[0][0][2][0][RTW89_ETSI][8] = 58,
[0][0][2][0][RTW89_MKK][8] = 62,
@@ -46189,6 +46411,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][8] = 76,
[0][0][2][0][RTW89_CN][8] = 58,
[0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 58,
[0][0][2][0][RTW89_FCC][10] = 76,
[0][0][2][0][RTW89_ETSI][10] = 58,
[0][0][2][0][RTW89_MKK][10] = 62,
@@ -46200,6 +46423,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][10] = 76,
[0][0][2][0][RTW89_CN][10] = 58,
[0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 58,
[0][0][2][0][RTW89_FCC][12] = 76,
[0][0][2][0][RTW89_ETSI][12] = 58,
[0][0][2][0][RTW89_MKK][12] = 62,
@@ -46211,6 +46435,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][12] = 76,
[0][0][2][0][RTW89_CN][12] = 58,
[0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 58,
[0][0][2][0][RTW89_FCC][14] = 76,
[0][0][2][0][RTW89_ETSI][14] = 58,
[0][0][2][0][RTW89_MKK][14] = 62,
@@ -46222,6 +46447,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][14] = 76,
[0][0][2][0][RTW89_CN][14] = 58,
[0][0][2][0][RTW89_QATAR][14] = 58,
+ [0][0][2][0][RTW89_UK][14] = 58,
[0][0][2][0][RTW89_FCC][15] = 74,
[0][0][2][0][RTW89_ETSI][15] = 58,
[0][0][2][0][RTW89_MKK][15] = 76,
@@ -46233,6 +46459,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][15] = 74,
[0][0][2][0][RTW89_CN][15] = 127,
[0][0][2][0][RTW89_QATAR][15] = 52,
+ [0][0][2][0][RTW89_UK][15] = 58,
[0][0][2][0][RTW89_FCC][17] = 76,
[0][0][2][0][RTW89_ETSI][17] = 58,
[0][0][2][0][RTW89_MKK][17] = 76,
@@ -46244,6 +46471,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][17] = 76,
[0][0][2][0][RTW89_CN][17] = 127,
[0][0][2][0][RTW89_QATAR][17] = 52,
+ [0][0][2][0][RTW89_UK][17] = 58,
[0][0][2][0][RTW89_FCC][19] = 76,
[0][0][2][0][RTW89_ETSI][19] = 58,
[0][0][2][0][RTW89_MKK][19] = 76,
@@ -46255,6 +46483,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][19] = 76,
[0][0][2][0][RTW89_CN][19] = 127,
[0][0][2][0][RTW89_QATAR][19] = 52,
+ [0][0][2][0][RTW89_UK][19] = 58,
[0][0][2][0][RTW89_FCC][21] = 76,
[0][0][2][0][RTW89_ETSI][21] = 58,
[0][0][2][0][RTW89_MKK][21] = 76,
@@ -46266,6 +46495,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][21] = 76,
[0][0][2][0][RTW89_CN][21] = 127,
[0][0][2][0][RTW89_QATAR][21] = 52,
+ [0][0][2][0][RTW89_UK][21] = 58,
[0][0][2][0][RTW89_FCC][23] = 76,
[0][0][2][0][RTW89_ETSI][23] = 58,
[0][0][2][0][RTW89_MKK][23] = 76,
@@ -46277,6 +46507,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][23] = 76,
[0][0][2][0][RTW89_CN][23] = 127,
[0][0][2][0][RTW89_QATAR][23] = 52,
+ [0][0][2][0][RTW89_UK][23] = 58,
[0][0][2][0][RTW89_FCC][25] = 76,
[0][0][2][0][RTW89_ETSI][25] = 58,
[0][0][2][0][RTW89_MKK][25] = 76,
@@ -46288,6 +46519,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][25] = 76,
[0][0][2][0][RTW89_CN][25] = 127,
[0][0][2][0][RTW89_QATAR][25] = 52,
+ [0][0][2][0][RTW89_UK][25] = 58,
[0][0][2][0][RTW89_FCC][27] = 76,
[0][0][2][0][RTW89_ETSI][27] = 58,
[0][0][2][0][RTW89_MKK][27] = 76,
@@ -46299,6 +46531,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][27] = 76,
[0][0][2][0][RTW89_CN][27] = 127,
[0][0][2][0][RTW89_QATAR][27] = 52,
+ [0][0][2][0][RTW89_UK][27] = 58,
[0][0][2][0][RTW89_FCC][29] = 76,
[0][0][2][0][RTW89_ETSI][29] = 58,
[0][0][2][0][RTW89_MKK][29] = 76,
@@ -46310,6 +46543,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][29] = 76,
[0][0][2][0][RTW89_CN][29] = 127,
[0][0][2][0][RTW89_QATAR][29] = 52,
+ [0][0][2][0][RTW89_UK][29] = 58,
[0][0][2][0][RTW89_FCC][31] = 76,
[0][0][2][0][RTW89_ETSI][31] = 58,
[0][0][2][0][RTW89_MKK][31] = 76,
@@ -46321,6 +46555,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][31] = 76,
[0][0][2][0][RTW89_CN][31] = 127,
[0][0][2][0][RTW89_QATAR][31] = 52,
+ [0][0][2][0][RTW89_UK][31] = 58,
[0][0][2][0][RTW89_FCC][33] = 76,
[0][0][2][0][RTW89_ETSI][33] = 58,
[0][0][2][0][RTW89_MKK][33] = 76,
@@ -46332,6 +46567,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][33] = 76,
[0][0][2][0][RTW89_CN][33] = 127,
[0][0][2][0][RTW89_QATAR][33] = 52,
+ [0][0][2][0][RTW89_UK][33] = 58,
[0][0][2][0][RTW89_FCC][35] = 70,
[0][0][2][0][RTW89_ETSI][35] = 58,
[0][0][2][0][RTW89_MKK][35] = 76,
@@ -46343,6 +46579,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][35] = 70,
[0][0][2][0][RTW89_CN][35] = 127,
[0][0][2][0][RTW89_QATAR][35] = 52,
+ [0][0][2][0][RTW89_UK][35] = 58,
[0][0][2][0][RTW89_FCC][37] = 76,
[0][0][2][0][RTW89_ETSI][37] = 127,
[0][0][2][0][RTW89_MKK][37] = 76,
@@ -46354,6 +46591,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][37] = 76,
[0][0][2][0][RTW89_CN][37] = 127,
[0][0][2][0][RTW89_QATAR][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 76,
[0][0][2][0][RTW89_FCC][38] = 76,
[0][0][2][0][RTW89_ETSI][38] = 28,
[0][0][2][0][RTW89_MKK][38] = 127,
@@ -46365,6 +46603,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][38] = 76,
[0][0][2][0][RTW89_CN][38] = 68,
[0][0][2][0][RTW89_QATAR][38] = 28,
+ [0][0][2][0][RTW89_UK][38] = 58,
[0][0][2][0][RTW89_FCC][40] = 76,
[0][0][2][0][RTW89_ETSI][40] = 28,
[0][0][2][0][RTW89_MKK][40] = 127,
@@ -46376,6 +46615,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][40] = 76,
[0][0][2][0][RTW89_CN][40] = 76,
[0][0][2][0][RTW89_QATAR][40] = 28,
+ [0][0][2][0][RTW89_UK][40] = 58,
[0][0][2][0][RTW89_FCC][42] = 76,
[0][0][2][0][RTW89_ETSI][42] = 28,
[0][0][2][0][RTW89_MKK][42] = 127,
@@ -46387,6 +46627,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][42] = 76,
[0][0][2][0][RTW89_CN][42] = 76,
[0][0][2][0][RTW89_QATAR][42] = 28,
+ [0][0][2][0][RTW89_UK][42] = 58,
[0][0][2][0][RTW89_FCC][44] = 76,
[0][0][2][0][RTW89_ETSI][44] = 28,
[0][0][2][0][RTW89_MKK][44] = 127,
@@ -46398,6 +46639,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][44] = 76,
[0][0][2][0][RTW89_CN][44] = 76,
[0][0][2][0][RTW89_QATAR][44] = 28,
+ [0][0][2][0][RTW89_UK][44] = 58,
[0][0][2][0][RTW89_FCC][46] = 76,
[0][0][2][0][RTW89_ETSI][46] = 28,
[0][0][2][0][RTW89_MKK][46] = 127,
@@ -46409,6 +46651,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][46] = 76,
[0][0][2][0][RTW89_CN][46] = 76,
[0][0][2][0][RTW89_QATAR][46] = 28,
+ [0][0][2][0][RTW89_UK][46] = 58,
[0][1][2][0][RTW89_FCC][0] = 68,
[0][1][2][0][RTW89_ETSI][0] = 46,
[0][1][2][0][RTW89_MKK][0] = 50,
@@ -46420,6 +46663,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][0] = 50,
[0][1][2][0][RTW89_CN][0] = 46,
[0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 46,
[0][1][2][0][RTW89_FCC][2] = 68,
[0][1][2][0][RTW89_ETSI][2] = 46,
[0][1][2][0][RTW89_MKK][2] = 50,
@@ -46431,6 +46675,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][2] = 50,
[0][1][2][0][RTW89_CN][2] = 46,
[0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 46,
[0][1][2][0][RTW89_FCC][4] = 68,
[0][1][2][0][RTW89_ETSI][4] = 46,
[0][1][2][0][RTW89_MKK][4] = 50,
@@ -46442,6 +46687,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][4] = 50,
[0][1][2][0][RTW89_CN][4] = 46,
[0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 46,
[0][1][2][0][RTW89_FCC][6] = 68,
[0][1][2][0][RTW89_ETSI][6] = 46,
[0][1][2][0][RTW89_MKK][6] = 50,
@@ -46453,6 +46699,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][6] = 50,
[0][1][2][0][RTW89_CN][6] = 46,
[0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 46,
[0][1][2][0][RTW89_FCC][8] = 68,
[0][1][2][0][RTW89_ETSI][8] = 46,
[0][1][2][0][RTW89_MKK][8] = 50,
@@ -46464,6 +46711,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][8] = 68,
[0][1][2][0][RTW89_CN][8] = 46,
[0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 46,
[0][1][2][0][RTW89_FCC][10] = 68,
[0][1][2][0][RTW89_ETSI][10] = 46,
[0][1][2][0][RTW89_MKK][10] = 50,
@@ -46475,6 +46723,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][10] = 68,
[0][1][2][0][RTW89_CN][10] = 46,
[0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 46,
[0][1][2][0][RTW89_FCC][12] = 68,
[0][1][2][0][RTW89_ETSI][12] = 46,
[0][1][2][0][RTW89_MKK][12] = 50,
@@ -46486,6 +46735,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][12] = 68,
[0][1][2][0][RTW89_CN][12] = 46,
[0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 46,
[0][1][2][0][RTW89_FCC][14] = 68,
[0][1][2][0][RTW89_ETSI][14] = 46,
[0][1][2][0][RTW89_MKK][14] = 50,
@@ -46497,6 +46747,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][14] = 68,
[0][1][2][0][RTW89_CN][14] = 46,
[0][1][2][0][RTW89_QATAR][14] = 46,
+ [0][1][2][0][RTW89_UK][14] = 46,
[0][1][2][0][RTW89_FCC][15] = 68,
[0][1][2][0][RTW89_ETSI][15] = 46,
[0][1][2][0][RTW89_MKK][15] = 70,
@@ -46508,6 +46759,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][15] = 68,
[0][1][2][0][RTW89_CN][15] = 127,
[0][1][2][0][RTW89_QATAR][15] = 40,
+ [0][1][2][0][RTW89_UK][15] = 46,
[0][1][2][0][RTW89_FCC][17] = 68,
[0][1][2][0][RTW89_ETSI][17] = 46,
[0][1][2][0][RTW89_MKK][17] = 70,
@@ -46519,6 +46771,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][17] = 68,
[0][1][2][0][RTW89_CN][17] = 127,
[0][1][2][0][RTW89_QATAR][17] = 40,
+ [0][1][2][0][RTW89_UK][17] = 46,
[0][1][2][0][RTW89_FCC][19] = 68,
[0][1][2][0][RTW89_ETSI][19] = 46,
[0][1][2][0][RTW89_MKK][19] = 70,
@@ -46530,6 +46783,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][19] = 68,
[0][1][2][0][RTW89_CN][19] = 127,
[0][1][2][0][RTW89_QATAR][19] = 40,
+ [0][1][2][0][RTW89_UK][19] = 46,
[0][1][2][0][RTW89_FCC][21] = 68,
[0][1][2][0][RTW89_ETSI][21] = 46,
[0][1][2][0][RTW89_MKK][21] = 70,
@@ -46541,6 +46795,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][21] = 68,
[0][1][2][0][RTW89_CN][21] = 127,
[0][1][2][0][RTW89_QATAR][21] = 40,
+ [0][1][2][0][RTW89_UK][21] = 46,
[0][1][2][0][RTW89_FCC][23] = 68,
[0][1][2][0][RTW89_ETSI][23] = 46,
[0][1][2][0][RTW89_MKK][23] = 70,
@@ -46552,6 +46807,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][23] = 68,
[0][1][2][0][RTW89_CN][23] = 127,
[0][1][2][0][RTW89_QATAR][23] = 40,
+ [0][1][2][0][RTW89_UK][23] = 46,
[0][1][2][0][RTW89_FCC][25] = 68,
[0][1][2][0][RTW89_ETSI][25] = 46,
[0][1][2][0][RTW89_MKK][25] = 70,
@@ -46563,6 +46819,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][25] = 68,
[0][1][2][0][RTW89_CN][25] = 127,
[0][1][2][0][RTW89_QATAR][25] = 40,
+ [0][1][2][0][RTW89_UK][25] = 46,
[0][1][2][0][RTW89_FCC][27] = 68,
[0][1][2][0][RTW89_ETSI][27] = 46,
[0][1][2][0][RTW89_MKK][27] = 70,
@@ -46574,6 +46831,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][27] = 68,
[0][1][2][0][RTW89_CN][27] = 127,
[0][1][2][0][RTW89_QATAR][27] = 40,
+ [0][1][2][0][RTW89_UK][27] = 46,
[0][1][2][0][RTW89_FCC][29] = 68,
[0][1][2][0][RTW89_ETSI][29] = 46,
[0][1][2][0][RTW89_MKK][29] = 70,
@@ -46585,6 +46843,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][29] = 68,
[0][1][2][0][RTW89_CN][29] = 127,
[0][1][2][0][RTW89_QATAR][29] = 40,
+ [0][1][2][0][RTW89_UK][29] = 46,
[0][1][2][0][RTW89_FCC][31] = 68,
[0][1][2][0][RTW89_ETSI][31] = 46,
[0][1][2][0][RTW89_MKK][31] = 70,
@@ -46596,6 +46855,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][31] = 68,
[0][1][2][0][RTW89_CN][31] = 127,
[0][1][2][0][RTW89_QATAR][31] = 40,
+ [0][1][2][0][RTW89_UK][31] = 46,
[0][1][2][0][RTW89_FCC][33] = 68,
[0][1][2][0][RTW89_ETSI][33] = 46,
[0][1][2][0][RTW89_MKK][33] = 70,
@@ -46607,6 +46867,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][33] = 68,
[0][1][2][0][RTW89_CN][33] = 127,
[0][1][2][0][RTW89_QATAR][33] = 40,
+ [0][1][2][0][RTW89_UK][33] = 46,
[0][1][2][0][RTW89_FCC][35] = 64,
[0][1][2][0][RTW89_ETSI][35] = 46,
[0][1][2][0][RTW89_MKK][35] = 70,
@@ -46618,6 +46879,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][35] = 64,
[0][1][2][0][RTW89_CN][35] = 127,
[0][1][2][0][RTW89_QATAR][35] = 40,
+ [0][1][2][0][RTW89_UK][35] = 46,
[0][1][2][0][RTW89_FCC][37] = 68,
[0][1][2][0][RTW89_ETSI][37] = 127,
[0][1][2][0][RTW89_MKK][37] = 70,
@@ -46629,6 +46891,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][37] = 68,
[0][1][2][0][RTW89_CN][37] = 127,
[0][1][2][0][RTW89_QATAR][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 74,
[0][1][2][0][RTW89_FCC][38] = 76,
[0][1][2][0][RTW89_ETSI][38] = 16,
[0][1][2][0][RTW89_MKK][38] = 127,
@@ -46640,6 +46903,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][38] = 76,
[0][1][2][0][RTW89_CN][38] = 68,
[0][1][2][0][RTW89_QATAR][38] = 16,
+ [0][1][2][0][RTW89_UK][38] = 46,
[0][1][2][0][RTW89_FCC][40] = 76,
[0][1][2][0][RTW89_ETSI][40] = 16,
[0][1][2][0][RTW89_MKK][40] = 127,
@@ -46651,6 +46915,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][40] = 76,
[0][1][2][0][RTW89_CN][40] = 76,
[0][1][2][0][RTW89_QATAR][40] = 16,
+ [0][1][2][0][RTW89_UK][40] = 46,
[0][1][2][0][RTW89_FCC][42] = 76,
[0][1][2][0][RTW89_ETSI][42] = 16,
[0][1][2][0][RTW89_MKK][42] = 127,
@@ -46662,6 +46927,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][42] = 76,
[0][1][2][0][RTW89_CN][42] = 76,
[0][1][2][0][RTW89_QATAR][42] = 16,
+ [0][1][2][0][RTW89_UK][42] = 46,
[0][1][2][0][RTW89_FCC][44] = 76,
[0][1][2][0][RTW89_ETSI][44] = 16,
[0][1][2][0][RTW89_MKK][44] = 127,
@@ -46673,6 +46939,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][44] = 76,
[0][1][2][0][RTW89_CN][44] = 76,
[0][1][2][0][RTW89_QATAR][44] = 16,
+ [0][1][2][0][RTW89_UK][44] = 46,
[0][1][2][0][RTW89_FCC][46] = 76,
[0][1][2][0][RTW89_ETSI][46] = 16,
[0][1][2][0][RTW89_MKK][46] = 127,
@@ -46684,6 +46951,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][46] = 76,
[0][1][2][0][RTW89_CN][46] = 76,
[0][1][2][0][RTW89_QATAR][46] = 16,
+ [0][1][2][0][RTW89_UK][46] = 46,
[0][1][2][1][RTW89_FCC][0] = 68,
[0][1][2][1][RTW89_ETSI][0] = 34,
[0][1][2][1][RTW89_MKK][0] = 50,
@@ -46695,6 +46963,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][0] = 50,
[0][1][2][1][RTW89_CN][0] = 34,
[0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_UK][0] = 34,
[0][1][2][1][RTW89_FCC][2] = 68,
[0][1][2][1][RTW89_ETSI][2] = 34,
[0][1][2][1][RTW89_MKK][2] = 50,
@@ -46706,6 +46975,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][2] = 50,
[0][1][2][1][RTW89_CN][2] = 34,
[0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 34,
[0][1][2][1][RTW89_FCC][4] = 68,
[0][1][2][1][RTW89_ETSI][4] = 34,
[0][1][2][1][RTW89_MKK][4] = 50,
@@ -46717,6 +46987,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][4] = 50,
[0][1][2][1][RTW89_CN][4] = 34,
[0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 34,
[0][1][2][1][RTW89_FCC][6] = 68,
[0][1][2][1][RTW89_ETSI][6] = 34,
[0][1][2][1][RTW89_MKK][6] = 50,
@@ -46728,6 +46999,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][6] = 50,
[0][1][2][1][RTW89_CN][6] = 34,
[0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 34,
[0][1][2][1][RTW89_FCC][8] = 68,
[0][1][2][1][RTW89_ETSI][8] = 34,
[0][1][2][1][RTW89_MKK][8] = 50,
@@ -46739,6 +47011,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][8] = 68,
[0][1][2][1][RTW89_CN][8] = 34,
[0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 34,
[0][1][2][1][RTW89_FCC][10] = 68,
[0][1][2][1][RTW89_ETSI][10] = 34,
[0][1][2][1][RTW89_MKK][10] = 50,
@@ -46750,6 +47023,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][10] = 68,
[0][1][2][1][RTW89_CN][10] = 34,
[0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 34,
[0][1][2][1][RTW89_FCC][12] = 68,
[0][1][2][1][RTW89_ETSI][12] = 34,
[0][1][2][1][RTW89_MKK][12] = 50,
@@ -46761,6 +47035,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][12] = 68,
[0][1][2][1][RTW89_CN][12] = 34,
[0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 34,
[0][1][2][1][RTW89_FCC][14] = 68,
[0][1][2][1][RTW89_ETSI][14] = 34,
[0][1][2][1][RTW89_MKK][14] = 50,
@@ -46772,6 +47047,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][14] = 68,
[0][1][2][1][RTW89_CN][14] = 34,
[0][1][2][1][RTW89_QATAR][14] = 34,
+ [0][1][2][1][RTW89_UK][14] = 34,
[0][1][2][1][RTW89_FCC][15] = 68,
[0][1][2][1][RTW89_ETSI][15] = 34,
[0][1][2][1][RTW89_MKK][15] = 70,
@@ -46783,6 +47059,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][15] = 68,
[0][1][2][1][RTW89_CN][15] = 127,
[0][1][2][1][RTW89_QATAR][15] = 28,
+ [0][1][2][1][RTW89_UK][15] = 34,
[0][1][2][1][RTW89_FCC][17] = 68,
[0][1][2][1][RTW89_ETSI][17] = 34,
[0][1][2][1][RTW89_MKK][17] = 70,
@@ -46794,6 +47071,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][17] = 68,
[0][1][2][1][RTW89_CN][17] = 127,
[0][1][2][1][RTW89_QATAR][17] = 28,
+ [0][1][2][1][RTW89_UK][17] = 34,
[0][1][2][1][RTW89_FCC][19] = 68,
[0][1][2][1][RTW89_ETSI][19] = 34,
[0][1][2][1][RTW89_MKK][19] = 70,
@@ -46805,6 +47083,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][19] = 68,
[0][1][2][1][RTW89_CN][19] = 127,
[0][1][2][1][RTW89_QATAR][19] = 28,
+ [0][1][2][1][RTW89_UK][19] = 34,
[0][1][2][1][RTW89_FCC][21] = 68,
[0][1][2][1][RTW89_ETSI][21] = 34,
[0][1][2][1][RTW89_MKK][21] = 70,
@@ -46816,6 +47095,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][21] = 68,
[0][1][2][1][RTW89_CN][21] = 127,
[0][1][2][1][RTW89_QATAR][21] = 28,
+ [0][1][2][1][RTW89_UK][21] = 34,
[0][1][2][1][RTW89_FCC][23] = 68,
[0][1][2][1][RTW89_ETSI][23] = 34,
[0][1][2][1][RTW89_MKK][23] = 70,
@@ -46827,6 +47107,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][23] = 68,
[0][1][2][1][RTW89_CN][23] = 127,
[0][1][2][1][RTW89_QATAR][23] = 28,
+ [0][1][2][1][RTW89_UK][23] = 34,
[0][1][2][1][RTW89_FCC][25] = 68,
[0][1][2][1][RTW89_ETSI][25] = 34,
[0][1][2][1][RTW89_MKK][25] = 70,
@@ -46838,6 +47119,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][25] = 68,
[0][1][2][1][RTW89_CN][25] = 127,
[0][1][2][1][RTW89_QATAR][25] = 28,
+ [0][1][2][1][RTW89_UK][25] = 34,
[0][1][2][1][RTW89_FCC][27] = 68,
[0][1][2][1][RTW89_ETSI][27] = 34,
[0][1][2][1][RTW89_MKK][27] = 70,
@@ -46849,6 +47131,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][27] = 68,
[0][1][2][1][RTW89_CN][27] = 127,
[0][1][2][1][RTW89_QATAR][27] = 28,
+ [0][1][2][1][RTW89_UK][27] = 34,
[0][1][2][1][RTW89_FCC][29] = 68,
[0][1][2][1][RTW89_ETSI][29] = 34,
[0][1][2][1][RTW89_MKK][29] = 70,
@@ -46860,6 +47143,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][29] = 68,
[0][1][2][1][RTW89_CN][29] = 127,
[0][1][2][1][RTW89_QATAR][29] = 28,
+ [0][1][2][1][RTW89_UK][29] = 34,
[0][1][2][1][RTW89_FCC][31] = 68,
[0][1][2][1][RTW89_ETSI][31] = 34,
[0][1][2][1][RTW89_MKK][31] = 70,
@@ -46871,6 +47155,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][31] = 68,
[0][1][2][1][RTW89_CN][31] = 127,
[0][1][2][1][RTW89_QATAR][31] = 28,
+ [0][1][2][1][RTW89_UK][31] = 34,
[0][1][2][1][RTW89_FCC][33] = 68,
[0][1][2][1][RTW89_ETSI][33] = 34,
[0][1][2][1][RTW89_MKK][33] = 70,
@@ -46882,6 +47167,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][33] = 68,
[0][1][2][1][RTW89_CN][33] = 127,
[0][1][2][1][RTW89_QATAR][33] = 28,
+ [0][1][2][1][RTW89_UK][33] = 34,
[0][1][2][1][RTW89_FCC][35] = 64,
[0][1][2][1][RTW89_ETSI][35] = 34,
[0][1][2][1][RTW89_MKK][35] = 70,
@@ -46893,6 +47179,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][35] = 64,
[0][1][2][1][RTW89_CN][35] = 127,
[0][1][2][1][RTW89_QATAR][35] = 28,
+ [0][1][2][1][RTW89_UK][35] = 34,
[0][1][2][1][RTW89_FCC][37] = 68,
[0][1][2][1][RTW89_ETSI][37] = 127,
[0][1][2][1][RTW89_MKK][37] = 70,
@@ -46904,6 +47191,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][37] = 68,
[0][1][2][1][RTW89_CN][37] = 127,
[0][1][2][1][RTW89_QATAR][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 62,
[0][1][2][1][RTW89_FCC][38] = 76,
[0][1][2][1][RTW89_ETSI][38] = 4,
[0][1][2][1][RTW89_MKK][38] = 127,
@@ -46915,6 +47203,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][38] = 76,
[0][1][2][1][RTW89_CN][38] = 68,
[0][1][2][1][RTW89_QATAR][38] = 4,
+ [0][1][2][1][RTW89_UK][38] = 34,
[0][1][2][1][RTW89_FCC][40] = 76,
[0][1][2][1][RTW89_ETSI][40] = 4,
[0][1][2][1][RTW89_MKK][40] = 127,
@@ -46926,6 +47215,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][40] = 76,
[0][1][2][1][RTW89_CN][40] = 70,
[0][1][2][1][RTW89_QATAR][40] = 4,
+ [0][1][2][1][RTW89_UK][40] = 34,
[0][1][2][1][RTW89_FCC][42] = 76,
[0][1][2][1][RTW89_ETSI][42] = 4,
[0][1][2][1][RTW89_MKK][42] = 127,
@@ -46937,6 +47227,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][42] = 76,
[0][1][2][1][RTW89_CN][42] = 70,
[0][1][2][1][RTW89_QATAR][42] = 4,
+ [0][1][2][1][RTW89_UK][42] = 34,
[0][1][2][1][RTW89_FCC][44] = 76,
[0][1][2][1][RTW89_ETSI][44] = 4,
[0][1][2][1][RTW89_MKK][44] = 127,
@@ -46948,6 +47239,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][44] = 76,
[0][1][2][1][RTW89_CN][44] = 70,
[0][1][2][1][RTW89_QATAR][44] = 4,
+ [0][1][2][1][RTW89_UK][44] = 34,
[0][1][2][1][RTW89_FCC][46] = 76,
[0][1][2][1][RTW89_ETSI][46] = 4,
[0][1][2][1][RTW89_MKK][46] = 127,
@@ -46959,6 +47251,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][46] = 76,
[0][1][2][1][RTW89_CN][46] = 70,
[0][1][2][1][RTW89_QATAR][46] = 4,
+ [0][1][2][1][RTW89_UK][46] = 34,
[1][0][2][0][RTW89_FCC][1] = 68,
[1][0][2][0][RTW89_ETSI][1] = 64,
[1][0][2][0][RTW89_MKK][1] = 62,
@@ -46970,6 +47263,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][1] = 62,
[1][0][2][0][RTW89_CN][1] = 64,
[1][0][2][0][RTW89_QATAR][1] = 64,
+ [1][0][2][0][RTW89_UK][1] = 64,
[1][0][2][0][RTW89_FCC][5] = 72,
[1][0][2][0][RTW89_ETSI][5] = 64,
[1][0][2][0][RTW89_MKK][5] = 62,
@@ -46981,6 +47275,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][5] = 62,
[1][0][2][0][RTW89_CN][5] = 64,
[1][0][2][0][RTW89_QATAR][5] = 64,
+ [1][0][2][0][RTW89_UK][5] = 64,
[1][0][2][0][RTW89_FCC][9] = 72,
[1][0][2][0][RTW89_ETSI][9] = 64,
[1][0][2][0][RTW89_MKK][9] = 62,
@@ -46992,6 +47287,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][9] = 72,
[1][0][2][0][RTW89_CN][9] = 64,
[1][0][2][0][RTW89_QATAR][9] = 64,
+ [1][0][2][0][RTW89_UK][9] = 64,
[1][0][2][0][RTW89_FCC][13] = 66,
[1][0][2][0][RTW89_ETSI][13] = 64,
[1][0][2][0][RTW89_MKK][13] = 62,
@@ -47003,6 +47299,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][13] = 66,
[1][0][2][0][RTW89_CN][13] = 64,
[1][0][2][0][RTW89_QATAR][13] = 64,
+ [1][0][2][0][RTW89_UK][13] = 64,
[1][0][2][0][RTW89_FCC][16] = 62,
[1][0][2][0][RTW89_ETSI][16] = 64,
[1][0][2][0][RTW89_MKK][16] = 72,
@@ -47014,6 +47311,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][16] = 62,
[1][0][2][0][RTW89_CN][16] = 127,
[1][0][2][0][RTW89_QATAR][16] = 52,
+ [1][0][2][0][RTW89_UK][16] = 64,
[1][0][2][0][RTW89_FCC][20] = 72,
[1][0][2][0][RTW89_ETSI][20] = 64,
[1][0][2][0][RTW89_MKK][20] = 72,
@@ -47025,6 +47323,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][20] = 72,
[1][0][2][0][RTW89_CN][20] = 127,
[1][0][2][0][RTW89_QATAR][20] = 52,
+ [1][0][2][0][RTW89_UK][20] = 64,
[1][0][2][0][RTW89_FCC][24] = 72,
[1][0][2][0][RTW89_ETSI][24] = 64,
[1][0][2][0][RTW89_MKK][24] = 72,
@@ -47036,6 +47335,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][24] = 72,
[1][0][2][0][RTW89_CN][24] = 127,
[1][0][2][0][RTW89_QATAR][24] = 52,
+ [1][0][2][0][RTW89_UK][24] = 64,
[1][0][2][0][RTW89_FCC][28] = 72,
[1][0][2][0][RTW89_ETSI][28] = 64,
[1][0][2][0][RTW89_MKK][28] = 72,
@@ -47047,6 +47347,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][28] = 72,
[1][0][2][0][RTW89_CN][28] = 127,
[1][0][2][0][RTW89_QATAR][28] = 52,
+ [1][0][2][0][RTW89_UK][28] = 64,
[1][0][2][0][RTW89_FCC][32] = 72,
[1][0][2][0][RTW89_ETSI][32] = 64,
[1][0][2][0][RTW89_MKK][32] = 72,
@@ -47058,6 +47359,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][32] = 72,
[1][0][2][0][RTW89_CN][32] = 127,
[1][0][2][0][RTW89_QATAR][32] = 52,
+ [1][0][2][0][RTW89_UK][32] = 64,
[1][0][2][0][RTW89_FCC][36] = 72,
[1][0][2][0][RTW89_ETSI][36] = 127,
[1][0][2][0][RTW89_MKK][36] = 72,
@@ -47069,6 +47371,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][36] = 72,
[1][0][2][0][RTW89_CN][36] = 127,
[1][0][2][0][RTW89_QATAR][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 72,
[1][0][2][0][RTW89_FCC][39] = 72,
[1][0][2][0][RTW89_ETSI][39] = 28,
[1][0][2][0][RTW89_MKK][39] = 127,
@@ -47080,6 +47383,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][39] = 72,
[1][0][2][0][RTW89_CN][39] = 68,
[1][0][2][0][RTW89_QATAR][39] = 28,
+ [1][0][2][0][RTW89_UK][39] = 64,
[1][0][2][0][RTW89_FCC][43] = 72,
[1][0][2][0][RTW89_ETSI][43] = 28,
[1][0][2][0][RTW89_MKK][43] = 127,
@@ -47091,6 +47395,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][43] = 72,
[1][0][2][0][RTW89_CN][43] = 72,
[1][0][2][0][RTW89_QATAR][43] = 28,
+ [1][0][2][0][RTW89_UK][43] = 64,
[1][1][2][0][RTW89_FCC][1] = 58,
[1][1][2][0][RTW89_ETSI][1] = 52,
[1][1][2][0][RTW89_MKK][1] = 50,
@@ -47102,6 +47407,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][1] = 50,
[1][1][2][0][RTW89_CN][1] = 52,
[1][1][2][0][RTW89_QATAR][1] = 52,
+ [1][1][2][0][RTW89_UK][1] = 52,
[1][1][2][0][RTW89_FCC][5] = 72,
[1][1][2][0][RTW89_ETSI][5] = 52,
[1][1][2][0][RTW89_MKK][5] = 50,
@@ -47113,6 +47419,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][5] = 50,
[1][1][2][0][RTW89_CN][5] = 52,
[1][1][2][0][RTW89_QATAR][5] = 52,
+ [1][1][2][0][RTW89_UK][5] = 52,
[1][1][2][0][RTW89_FCC][9] = 72,
[1][1][2][0][RTW89_ETSI][9] = 52,
[1][1][2][0][RTW89_MKK][9] = 50,
@@ -47124,6 +47431,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][9] = 72,
[1][1][2][0][RTW89_CN][9] = 52,
[1][1][2][0][RTW89_QATAR][9] = 52,
+ [1][1][2][0][RTW89_UK][9] = 52,
[1][1][2][0][RTW89_FCC][13] = 58,
[1][1][2][0][RTW89_ETSI][13] = 52,
[1][1][2][0][RTW89_MKK][13] = 50,
@@ -47135,6 +47443,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][13] = 58,
[1][1][2][0][RTW89_CN][13] = 52,
[1][1][2][0][RTW89_QATAR][13] = 52,
+ [1][1][2][0][RTW89_UK][13] = 52,
[1][1][2][0][RTW89_FCC][16] = 56,
[1][1][2][0][RTW89_ETSI][16] = 52,
[1][1][2][0][RTW89_MKK][16] = 72,
@@ -47146,6 +47455,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][16] = 56,
[1][1][2][0][RTW89_CN][16] = 127,
[1][1][2][0][RTW89_QATAR][16] = 40,
+ [1][1][2][0][RTW89_UK][16] = 52,
[1][1][2][0][RTW89_FCC][20] = 72,
[1][1][2][0][RTW89_ETSI][20] = 52,
[1][1][2][0][RTW89_MKK][20] = 72,
@@ -47157,6 +47467,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][20] = 72,
[1][1][2][0][RTW89_CN][20] = 127,
[1][1][2][0][RTW89_QATAR][20] = 40,
+ [1][1][2][0][RTW89_UK][20] = 52,
[1][1][2][0][RTW89_FCC][24] = 72,
[1][1][2][0][RTW89_ETSI][24] = 52,
[1][1][2][0][RTW89_MKK][24] = 72,
@@ -47168,6 +47479,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][24] = 72,
[1][1][2][0][RTW89_CN][24] = 127,
[1][1][2][0][RTW89_QATAR][24] = 40,
+ [1][1][2][0][RTW89_UK][24] = 52,
[1][1][2][0][RTW89_FCC][28] = 72,
[1][1][2][0][RTW89_ETSI][28] = 52,
[1][1][2][0][RTW89_MKK][28] = 72,
@@ -47179,6 +47491,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][28] = 72,
[1][1][2][0][RTW89_CN][28] = 127,
[1][1][2][0][RTW89_QATAR][28] = 40,
+ [1][1][2][0][RTW89_UK][28] = 52,
[1][1][2][0][RTW89_FCC][32] = 68,
[1][1][2][0][RTW89_ETSI][32] = 52,
[1][1][2][0][RTW89_MKK][32] = 72,
@@ -47190,6 +47503,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][32] = 68,
[1][1][2][0][RTW89_CN][32] = 127,
[1][1][2][0][RTW89_QATAR][32] = 40,
+ [1][1][2][0][RTW89_UK][32] = 52,
[1][1][2][0][RTW89_FCC][36] = 72,
[1][1][2][0][RTW89_ETSI][36] = 127,
[1][1][2][0][RTW89_MKK][36] = 72,
@@ -47201,6 +47515,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][36] = 72,
[1][1][2][0][RTW89_CN][36] = 127,
[1][1][2][0][RTW89_QATAR][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 72,
[1][1][2][0][RTW89_FCC][39] = 72,
[1][1][2][0][RTW89_ETSI][39] = 16,
[1][1][2][0][RTW89_MKK][39] = 127,
@@ -47212,6 +47527,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][39] = 72,
[1][1][2][0][RTW89_CN][39] = 68,
[1][1][2][0][RTW89_QATAR][39] = 16,
+ [1][1][2][0][RTW89_UK][39] = 52,
[1][1][2][0][RTW89_FCC][43] = 72,
[1][1][2][0][RTW89_ETSI][43] = 16,
[1][1][2][0][RTW89_MKK][43] = 127,
@@ -47223,6 +47539,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][43] = 72,
[1][1][2][0][RTW89_CN][43] = 72,
[1][1][2][0][RTW89_QATAR][43] = 16,
+ [1][1][2][0][RTW89_UK][43] = 52,
[1][1][2][1][RTW89_FCC][1] = 58,
[1][1][2][1][RTW89_ETSI][1] = 40,
[1][1][2][1][RTW89_MKK][1] = 50,
@@ -47234,6 +47551,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][1] = 50,
[1][1][2][1][RTW89_CN][1] = 40,
[1][1][2][1][RTW89_QATAR][1] = 40,
+ [1][1][2][1][RTW89_UK][1] = 40,
[1][1][2][1][RTW89_FCC][5] = 68,
[1][1][2][1][RTW89_ETSI][5] = 40,
[1][1][2][1][RTW89_MKK][5] = 50,
@@ -47245,6 +47563,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][5] = 50,
[1][1][2][1][RTW89_CN][5] = 40,
[1][1][2][1][RTW89_QATAR][5] = 40,
+ [1][1][2][1][RTW89_UK][5] = 40,
[1][1][2][1][RTW89_FCC][9] = 68,
[1][1][2][1][RTW89_ETSI][9] = 40,
[1][1][2][1][RTW89_MKK][9] = 50,
@@ -47256,6 +47575,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][9] = 68,
[1][1][2][1][RTW89_CN][9] = 40,
[1][1][2][1][RTW89_QATAR][9] = 40,
+ [1][1][2][1][RTW89_UK][9] = 40,
[1][1][2][1][RTW89_FCC][13] = 58,
[1][1][2][1][RTW89_ETSI][13] = 40,
[1][1][2][1][RTW89_MKK][13] = 50,
@@ -47267,6 +47587,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][13] = 58,
[1][1][2][1][RTW89_CN][13] = 40,
[1][1][2][1][RTW89_QATAR][13] = 40,
+ [1][1][2][1][RTW89_UK][13] = 40,
[1][1][2][1][RTW89_FCC][16] = 56,
[1][1][2][1][RTW89_ETSI][16] = 40,
[1][1][2][1][RTW89_MKK][16] = 72,
@@ -47278,6 +47599,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][16] = 56,
[1][1][2][1][RTW89_CN][16] = 127,
[1][1][2][1][RTW89_QATAR][16] = 28,
+ [1][1][2][1][RTW89_UK][16] = 40,
[1][1][2][1][RTW89_FCC][20] = 68,
[1][1][2][1][RTW89_ETSI][20] = 40,
[1][1][2][1][RTW89_MKK][20] = 72,
@@ -47289,6 +47611,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][20] = 68,
[1][1][2][1][RTW89_CN][20] = 127,
[1][1][2][1][RTW89_QATAR][20] = 28,
+ [1][1][2][1][RTW89_UK][20] = 40,
[1][1][2][1][RTW89_FCC][24] = 68,
[1][1][2][1][RTW89_ETSI][24] = 40,
[1][1][2][1][RTW89_MKK][24] = 72,
@@ -47300,6 +47623,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][24] = 68,
[1][1][2][1][RTW89_CN][24] = 127,
[1][1][2][1][RTW89_QATAR][24] = 28,
+ [1][1][2][1][RTW89_UK][24] = 40,
[1][1][2][1][RTW89_FCC][28] = 68,
[1][1][2][1][RTW89_ETSI][28] = 40,
[1][1][2][1][RTW89_MKK][28] = 72,
@@ -47311,6 +47635,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][28] = 68,
[1][1][2][1][RTW89_CN][28] = 127,
[1][1][2][1][RTW89_QATAR][28] = 28,
+ [1][1][2][1][RTW89_UK][28] = 40,
[1][1][2][1][RTW89_FCC][32] = 68,
[1][1][2][1][RTW89_ETSI][32] = 40,
[1][1][2][1][RTW89_MKK][32] = 72,
@@ -47322,6 +47647,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][32] = 68,
[1][1][2][1][RTW89_CN][32] = 127,
[1][1][2][1][RTW89_QATAR][32] = 28,
+ [1][1][2][1][RTW89_UK][32] = 40,
[1][1][2][1][RTW89_FCC][36] = 68,
[1][1][2][1][RTW89_ETSI][36] = 127,
[1][1][2][1][RTW89_MKK][36] = 72,
@@ -47333,6 +47659,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][36] = 68,
[1][1][2][1][RTW89_CN][36] = 127,
[1][1][2][1][RTW89_QATAR][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 66,
[1][1][2][1][RTW89_FCC][39] = 72,
[1][1][2][1][RTW89_ETSI][39] = 4,
[1][1][2][1][RTW89_MKK][39] = 127,
@@ -47344,6 +47671,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][39] = 72,
[1][1][2][1][RTW89_CN][39] = 62,
[1][1][2][1][RTW89_QATAR][39] = 4,
+ [1][1][2][1][RTW89_UK][39] = 40,
[1][1][2][1][RTW89_FCC][43] = 72,
[1][1][2][1][RTW89_ETSI][43] = 4,
[1][1][2][1][RTW89_MKK][43] = 127,
@@ -47355,6 +47683,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][43] = 72,
[1][1][2][1][RTW89_CN][43] = 72,
[1][1][2][1][RTW89_QATAR][43] = 4,
+ [1][1][2][1][RTW89_UK][43] = 40,
[2][0][2][0][RTW89_FCC][3] = 64,
[2][0][2][0][RTW89_ETSI][3] = 64,
[2][0][2][0][RTW89_MKK][3] = 64,
@@ -47366,6 +47695,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][3] = 62,
[2][0][2][0][RTW89_CN][3] = 64,
[2][0][2][0][RTW89_QATAR][3] = 64,
+ [2][0][2][0][RTW89_UK][3] = 64,
[2][0][2][0][RTW89_FCC][11] = 64,
[2][0][2][0][RTW89_ETSI][11] = 64,
[2][0][2][0][RTW89_MKK][11] = 64,
@@ -47377,6 +47707,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][11] = 64,
[2][0][2][0][RTW89_CN][11] = 64,
[2][0][2][0][RTW89_QATAR][11] = 64,
+ [2][0][2][0][RTW89_UK][11] = 64,
[2][0][2][0][RTW89_FCC][18] = 62,
[2][0][2][0][RTW89_ETSI][18] = 64,
[2][0][2][0][RTW89_MKK][18] = 72,
@@ -47388,6 +47719,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][18] = 62,
[2][0][2][0][RTW89_CN][18] = 127,
[2][0][2][0][RTW89_QATAR][18] = 52,
+ [2][0][2][0][RTW89_UK][18] = 64,
[2][0][2][0][RTW89_FCC][26] = 72,
[2][0][2][0][RTW89_ETSI][26] = 64,
[2][0][2][0][RTW89_MKK][26] = 72,
@@ -47399,6 +47731,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][26] = 72,
[2][0][2][0][RTW89_CN][26] = 127,
[2][0][2][0][RTW89_QATAR][26] = 52,
+ [2][0][2][0][RTW89_UK][26] = 64,
[2][0][2][0][RTW89_FCC][34] = 72,
[2][0][2][0][RTW89_ETSI][34] = 127,
[2][0][2][0][RTW89_MKK][34] = 72,
@@ -47410,6 +47743,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][34] = 72,
[2][0][2][0][RTW89_CN][34] = 127,
[2][0][2][0][RTW89_QATAR][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 72,
[2][0][2][0][RTW89_FCC][41] = 72,
[2][0][2][0][RTW89_ETSI][41] = 28,
[2][0][2][0][RTW89_MKK][41] = 127,
@@ -47421,6 +47755,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][41] = 72,
[2][0][2][0][RTW89_CN][41] = 68,
[2][0][2][0][RTW89_QATAR][41] = 28,
+ [2][0][2][0][RTW89_UK][41] = 64,
[2][1][2][0][RTW89_FCC][3] = 56,
[2][1][2][0][RTW89_ETSI][3] = 52,
[2][1][2][0][RTW89_MKK][3] = 52,
@@ -47432,6 +47767,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][3] = 50,
[2][1][2][0][RTW89_CN][3] = 52,
[2][1][2][0][RTW89_QATAR][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 52,
[2][1][2][0][RTW89_FCC][11] = 56,
[2][1][2][0][RTW89_ETSI][11] = 52,
[2][1][2][0][RTW89_MKK][11] = 52,
@@ -47443,6 +47779,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][11] = 56,
[2][1][2][0][RTW89_CN][11] = 52,
[2][1][2][0][RTW89_QATAR][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 52,
[2][1][2][0][RTW89_FCC][18] = 56,
[2][1][2][0][RTW89_ETSI][18] = 52,
[2][1][2][0][RTW89_MKK][18] = 72,
@@ -47454,6 +47791,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][18] = 56,
[2][1][2][0][RTW89_CN][18] = 127,
[2][1][2][0][RTW89_QATAR][18] = 40,
+ [2][1][2][0][RTW89_UK][18] = 52,
[2][1][2][0][RTW89_FCC][26] = 72,
[2][1][2][0][RTW89_ETSI][26] = 52,
[2][1][2][0][RTW89_MKK][26] = 72,
@@ -47465,6 +47803,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][26] = 72,
[2][1][2][0][RTW89_CN][26] = 127,
[2][1][2][0][RTW89_QATAR][26] = 40,
+ [2][1][2][0][RTW89_UK][26] = 52,
[2][1][2][0][RTW89_FCC][34] = 72,
[2][1][2][0][RTW89_ETSI][34] = 127,
[2][1][2][0][RTW89_MKK][34] = 72,
@@ -47476,6 +47815,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][34] = 72,
[2][1][2][0][RTW89_CN][34] = 127,
[2][1][2][0][RTW89_QATAR][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 72,
[2][1][2][0][RTW89_FCC][41] = 72,
[2][1][2][0][RTW89_ETSI][41] = 16,
[2][1][2][0][RTW89_MKK][41] = 127,
@@ -47487,6 +47827,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][41] = 72,
[2][1][2][0][RTW89_CN][41] = 68,
[2][1][2][0][RTW89_QATAR][41] = 16,
+ [2][1][2][0][RTW89_UK][41] = 52,
[2][1][2][1][RTW89_FCC][3] = 56,
[2][1][2][1][RTW89_ETSI][3] = 40,
[2][1][2][1][RTW89_MKK][3] = 52,
@@ -47498,6 +47839,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][3] = 50,
[2][1][2][1][RTW89_CN][3] = 40,
[2][1][2][1][RTW89_QATAR][3] = 40,
+ [2][1][2][1][RTW89_UK][3] = 40,
[2][1][2][1][RTW89_FCC][11] = 56,
[2][1][2][1][RTW89_ETSI][11] = 40,
[2][1][2][1][RTW89_MKK][11] = 52,
@@ -47509,6 +47851,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][11] = 56,
[2][1][2][1][RTW89_CN][11] = 40,
[2][1][2][1][RTW89_QATAR][11] = 40,
+ [2][1][2][1][RTW89_UK][11] = 40,
[2][1][2][1][RTW89_FCC][18] = 56,
[2][1][2][1][RTW89_ETSI][18] = 40,
[2][1][2][1][RTW89_MKK][18] = 72,
@@ -47520,6 +47863,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][18] = 56,
[2][1][2][1][RTW89_CN][18] = 127,
[2][1][2][1][RTW89_QATAR][18] = 28,
+ [2][1][2][1][RTW89_UK][18] = 40,
[2][1][2][1][RTW89_FCC][26] = 68,
[2][1][2][1][RTW89_ETSI][26] = 40,
[2][1][2][1][RTW89_MKK][26] = 72,
@@ -47531,6 +47875,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][26] = 68,
[2][1][2][1][RTW89_CN][26] = 127,
[2][1][2][1][RTW89_QATAR][26] = 28,
+ [2][1][2][1][RTW89_UK][26] = 40,
[2][1][2][1][RTW89_FCC][34] = 68,
[2][1][2][1][RTW89_ETSI][34] = 127,
[2][1][2][1][RTW89_MKK][34] = 72,
@@ -47542,6 +47887,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][34] = 68,
[2][1][2][1][RTW89_CN][34] = 127,
[2][1][2][1][RTW89_QATAR][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 66,
[2][1][2][1][RTW89_FCC][41] = 72,
[2][1][2][1][RTW89_ETSI][41] = 4,
[2][1][2][1][RTW89_MKK][41] = 127,
@@ -47553,6 +47899,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][41] = 72,
[2][1][2][1][RTW89_CN][41] = 64,
[2][1][2][1][RTW89_QATAR][41] = 4,
+ [2][1][2][1][RTW89_UK][41] = 40,
};
const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -47652,6 +47999,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][0] = 70,
[0][0][RTW89_CN][0] = 32,
[0][0][RTW89_QATAR][0] = 32,
+ [0][0][RTW89_UK][0] = 32,
[0][0][RTW89_FCC][1] = 70,
[0][0][RTW89_ETSI][1] = 32,
[0][0][RTW89_MKK][1] = 40,
@@ -47663,6 +48011,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][1] = 70,
[0][0][RTW89_CN][1] = 32,
[0][0][RTW89_QATAR][1] = 32,
+ [0][0][RTW89_UK][1] = 32,
[0][0][RTW89_FCC][2] = 74,
[0][0][RTW89_ETSI][2] = 32,
[0][0][RTW89_MKK][2] = 40,
@@ -47674,6 +48023,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][2] = 74,
[0][0][RTW89_CN][2] = 32,
[0][0][RTW89_QATAR][2] = 32,
+ [0][0][RTW89_UK][2] = 32,
[0][0][RTW89_FCC][3] = 78,
[0][0][RTW89_ETSI][3] = 32,
[0][0][RTW89_MKK][3] = 40,
@@ -47685,6 +48035,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][3] = 78,
[0][0][RTW89_CN][3] = 32,
[0][0][RTW89_QATAR][3] = 32,
+ [0][0][RTW89_UK][3] = 32,
[0][0][RTW89_FCC][4] = 78,
[0][0][RTW89_ETSI][4] = 32,
[0][0][RTW89_MKK][4] = 40,
@@ -47696,6 +48047,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][4] = 78,
[0][0][RTW89_CN][4] = 32,
[0][0][RTW89_QATAR][4] = 32,
+ [0][0][RTW89_UK][4] = 32,
[0][0][RTW89_FCC][5] = 78,
[0][0][RTW89_ETSI][5] = 32,
[0][0][RTW89_MKK][5] = 40,
@@ -47707,6 +48059,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][5] = 78,
[0][0][RTW89_CN][5] = 32,
[0][0][RTW89_QATAR][5] = 32,
+ [0][0][RTW89_UK][5] = 32,
[0][0][RTW89_FCC][6] = 78,
[0][0][RTW89_ETSI][6] = 32,
[0][0][RTW89_MKK][6] = 40,
@@ -47718,6 +48071,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][6] = 78,
[0][0][RTW89_CN][6] = 32,
[0][0][RTW89_QATAR][6] = 32,
+ [0][0][RTW89_UK][6] = 32,
[0][0][RTW89_FCC][7] = 78,
[0][0][RTW89_ETSI][7] = 32,
[0][0][RTW89_MKK][7] = 40,
@@ -47729,6 +48083,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][7] = 78,
[0][0][RTW89_CN][7] = 32,
[0][0][RTW89_QATAR][7] = 32,
+ [0][0][RTW89_UK][7] = 32,
[0][0][RTW89_FCC][8] = 74,
[0][0][RTW89_ETSI][8] = 32,
[0][0][RTW89_MKK][8] = 40,
@@ -47740,6 +48095,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][8] = 74,
[0][0][RTW89_CN][8] = 32,
[0][0][RTW89_QATAR][8] = 32,
+ [0][0][RTW89_UK][8] = 32,
[0][0][RTW89_FCC][9] = 70,
[0][0][RTW89_ETSI][9] = 32,
[0][0][RTW89_MKK][9] = 40,
@@ -47751,6 +48107,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][9] = 70,
[0][0][RTW89_CN][9] = 32,
[0][0][RTW89_QATAR][9] = 32,
+ [0][0][RTW89_UK][9] = 32,
[0][0][RTW89_FCC][10] = 70,
[0][0][RTW89_ETSI][10] = 32,
[0][0][RTW89_MKK][10] = 40,
@@ -47762,6 +48119,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][10] = 70,
[0][0][RTW89_CN][10] = 32,
[0][0][RTW89_QATAR][10] = 32,
+ [0][0][RTW89_UK][10] = 32,
[0][0][RTW89_FCC][11] = 58,
[0][0][RTW89_ETSI][11] = 32,
[0][0][RTW89_MKK][11] = 40,
@@ -47773,6 +48131,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][11] = 58,
[0][0][RTW89_CN][11] = 32,
[0][0][RTW89_QATAR][11] = 32,
+ [0][0][RTW89_UK][11] = 32,
[0][0][RTW89_FCC][12] = 34,
[0][0][RTW89_ETSI][12] = 32,
[0][0][RTW89_MKK][12] = 40,
@@ -47784,6 +48143,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][12] = 34,
[0][0][RTW89_CN][12] = 32,
[0][0][RTW89_QATAR][12] = 32,
+ [0][0][RTW89_UK][12] = 32,
[0][0][RTW89_FCC][13] = 127,
[0][0][RTW89_ETSI][13] = 127,
[0][0][RTW89_MKK][13] = 127,
@@ -47795,6 +48155,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][13] = 127,
[0][0][RTW89_CN][13] = 127,
[0][0][RTW89_QATAR][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
[0][1][RTW89_FCC][0] = 64,
[0][1][RTW89_ETSI][0] = 20,
[0][1][RTW89_MKK][0] = 28,
@@ -47806,6 +48167,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][0] = 64,
[0][1][RTW89_CN][0] = 20,
[0][1][RTW89_QATAR][0] = 20,
+ [0][1][RTW89_UK][0] = 20,
[0][1][RTW89_FCC][1] = 64,
[0][1][RTW89_ETSI][1] = 20,
[0][1][RTW89_MKK][1] = 28,
@@ -47817,6 +48179,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][1] = 64,
[0][1][RTW89_CN][1] = 20,
[0][1][RTW89_QATAR][1] = 20,
+ [0][1][RTW89_UK][1] = 20,
[0][1][RTW89_FCC][2] = 68,
[0][1][RTW89_ETSI][2] = 20,
[0][1][RTW89_MKK][2] = 28,
@@ -47828,6 +48191,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][2] = 68,
[0][1][RTW89_CN][2] = 20,
[0][1][RTW89_QATAR][2] = 20,
+ [0][1][RTW89_UK][2] = 20,
[0][1][RTW89_FCC][3] = 72,
[0][1][RTW89_ETSI][3] = 20,
[0][1][RTW89_MKK][3] = 28,
@@ -47839,6 +48203,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][3] = 72,
[0][1][RTW89_CN][3] = 20,
[0][1][RTW89_QATAR][3] = 20,
+ [0][1][RTW89_UK][3] = 20,
[0][1][RTW89_FCC][4] = 76,
[0][1][RTW89_ETSI][4] = 20,
[0][1][RTW89_MKK][4] = 28,
@@ -47850,6 +48215,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][4] = 76,
[0][1][RTW89_CN][4] = 20,
[0][1][RTW89_QATAR][4] = 20,
+ [0][1][RTW89_UK][4] = 20,
[0][1][RTW89_FCC][5] = 78,
[0][1][RTW89_ETSI][5] = 20,
[0][1][RTW89_MKK][5] = 28,
@@ -47861,6 +48227,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][5] = 78,
[0][1][RTW89_CN][5] = 20,
[0][1][RTW89_QATAR][5] = 20,
+ [0][1][RTW89_UK][5] = 20,
[0][1][RTW89_FCC][6] = 76,
[0][1][RTW89_ETSI][6] = 20,
[0][1][RTW89_MKK][6] = 28,
@@ -47872,6 +48239,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][6] = 76,
[0][1][RTW89_CN][6] = 20,
[0][1][RTW89_QATAR][6] = 20,
+ [0][1][RTW89_UK][6] = 20,
[0][1][RTW89_FCC][7] = 72,
[0][1][RTW89_ETSI][7] = 20,
[0][1][RTW89_MKK][7] = 28,
@@ -47883,6 +48251,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][7] = 72,
[0][1][RTW89_CN][7] = 20,
[0][1][RTW89_QATAR][7] = 20,
+ [0][1][RTW89_UK][7] = 20,
[0][1][RTW89_FCC][8] = 68,
[0][1][RTW89_ETSI][8] = 20,
[0][1][RTW89_MKK][8] = 28,
@@ -47894,6 +48263,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][8] = 68,
[0][1][RTW89_CN][8] = 20,
[0][1][RTW89_QATAR][8] = 20,
+ [0][1][RTW89_UK][8] = 20,
[0][1][RTW89_FCC][9] = 64,
[0][1][RTW89_ETSI][9] = 20,
[0][1][RTW89_MKK][9] = 28,
@@ -47905,6 +48275,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][9] = 64,
[0][1][RTW89_CN][9] = 20,
[0][1][RTW89_QATAR][9] = 20,
+ [0][1][RTW89_UK][9] = 20,
[0][1][RTW89_FCC][10] = 64,
[0][1][RTW89_ETSI][10] = 20,
[0][1][RTW89_MKK][10] = 28,
@@ -47916,6 +48287,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][10] = 64,
[0][1][RTW89_CN][10] = 20,
[0][1][RTW89_QATAR][10] = 20,
+ [0][1][RTW89_UK][10] = 20,
[0][1][RTW89_FCC][11] = 54,
[0][1][RTW89_ETSI][11] = 20,
[0][1][RTW89_MKK][11] = 28,
@@ -47927,6 +48299,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][11] = 54,
[0][1][RTW89_CN][11] = 20,
[0][1][RTW89_QATAR][11] = 20,
+ [0][1][RTW89_UK][11] = 20,
[0][1][RTW89_FCC][12] = 32,
[0][1][RTW89_ETSI][12] = 20,
[0][1][RTW89_MKK][12] = 28,
@@ -47938,6 +48311,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][12] = 32,
[0][1][RTW89_CN][12] = 20,
[0][1][RTW89_QATAR][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
[0][1][RTW89_FCC][13] = 127,
[0][1][RTW89_ETSI][13] = 127,
[0][1][RTW89_MKK][13] = 127,
@@ -47949,6 +48323,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][13] = 127,
[0][1][RTW89_CN][13] = 127,
[0][1][RTW89_QATAR][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
[1][0][RTW89_FCC][0] = 72,
[1][0][RTW89_ETSI][0] = 42,
[1][0][RTW89_MKK][0] = 50,
@@ -47960,6 +48335,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][0] = 72,
[1][0][RTW89_CN][0] = 42,
[1][0][RTW89_QATAR][0] = 42,
+ [1][0][RTW89_UK][0] = 42,
[1][0][RTW89_FCC][1] = 72,
[1][0][RTW89_ETSI][1] = 42,
[1][0][RTW89_MKK][1] = 50,
@@ -47971,6 +48347,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][1] = 72,
[1][0][RTW89_CN][1] = 42,
[1][0][RTW89_QATAR][1] = 42,
+ [1][0][RTW89_UK][1] = 42,
[1][0][RTW89_FCC][2] = 76,
[1][0][RTW89_ETSI][2] = 42,
[1][0][RTW89_MKK][2] = 50,
@@ -47982,6 +48359,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][2] = 76,
[1][0][RTW89_CN][2] = 42,
[1][0][RTW89_QATAR][2] = 42,
+ [1][0][RTW89_UK][2] = 42,
[1][0][RTW89_FCC][3] = 78,
[1][0][RTW89_ETSI][3] = 42,
[1][0][RTW89_MKK][3] = 50,
@@ -47993,6 +48371,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][3] = 78,
[1][0][RTW89_CN][3] = 42,
[1][0][RTW89_QATAR][3] = 42,
+ [1][0][RTW89_UK][3] = 42,
[1][0][RTW89_FCC][4] = 78,
[1][0][RTW89_ETSI][4] = 42,
[1][0][RTW89_MKK][4] = 50,
@@ -48004,6 +48383,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][4] = 78,
[1][0][RTW89_CN][4] = 42,
[1][0][RTW89_QATAR][4] = 42,
+ [1][0][RTW89_UK][4] = 42,
[1][0][RTW89_FCC][5] = 78,
[1][0][RTW89_ETSI][5] = 42,
[1][0][RTW89_MKK][5] = 50,
@@ -48015,6 +48395,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][5] = 78,
[1][0][RTW89_CN][5] = 42,
[1][0][RTW89_QATAR][5] = 42,
+ [1][0][RTW89_UK][5] = 42,
[1][0][RTW89_FCC][6] = 78,
[1][0][RTW89_ETSI][6] = 42,
[1][0][RTW89_MKK][6] = 50,
@@ -48026,6 +48407,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][6] = 78,
[1][0][RTW89_CN][6] = 42,
[1][0][RTW89_QATAR][6] = 42,
+ [1][0][RTW89_UK][6] = 42,
[1][0][RTW89_FCC][7] = 78,
[1][0][RTW89_ETSI][7] = 42,
[1][0][RTW89_MKK][7] = 50,
@@ -48037,6 +48419,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][7] = 78,
[1][0][RTW89_CN][7] = 42,
[1][0][RTW89_QATAR][7] = 42,
+ [1][0][RTW89_UK][7] = 42,
[1][0][RTW89_FCC][8] = 78,
[1][0][RTW89_ETSI][8] = 42,
[1][0][RTW89_MKK][8] = 50,
@@ -48048,6 +48431,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][8] = 78,
[1][0][RTW89_CN][8] = 42,
[1][0][RTW89_QATAR][8] = 42,
+ [1][0][RTW89_UK][8] = 42,
[1][0][RTW89_FCC][9] = 74,
[1][0][RTW89_ETSI][9] = 42,
[1][0][RTW89_MKK][9] = 50,
@@ -48059,6 +48443,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][9] = 74,
[1][0][RTW89_CN][9] = 42,
[1][0][RTW89_QATAR][9] = 42,
+ [1][0][RTW89_UK][9] = 42,
[1][0][RTW89_FCC][10] = 74,
[1][0][RTW89_ETSI][10] = 42,
[1][0][RTW89_MKK][10] = 50,
@@ -48070,6 +48455,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][10] = 74,
[1][0][RTW89_CN][10] = 42,
[1][0][RTW89_QATAR][10] = 42,
+ [1][0][RTW89_UK][10] = 42,
[1][0][RTW89_FCC][11] = 64,
[1][0][RTW89_ETSI][11] = 42,
[1][0][RTW89_MKK][11] = 50,
@@ -48081,6 +48467,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][11] = 64,
[1][0][RTW89_CN][11] = 42,
[1][0][RTW89_QATAR][11] = 42,
+ [1][0][RTW89_UK][11] = 42,
[1][0][RTW89_FCC][12] = 36,
[1][0][RTW89_ETSI][12] = 42,
[1][0][RTW89_MKK][12] = 50,
@@ -48092,6 +48479,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][12] = 36,
[1][0][RTW89_CN][12] = 42,
[1][0][RTW89_QATAR][12] = 42,
+ [1][0][RTW89_UK][12] = 42,
[1][0][RTW89_FCC][13] = 127,
[1][0][RTW89_ETSI][13] = 127,
[1][0][RTW89_MKK][13] = 127,
@@ -48103,6 +48491,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][13] = 127,
[1][0][RTW89_CN][13] = 127,
[1][0][RTW89_QATAR][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
[1][1][RTW89_FCC][0] = 66,
[1][1][RTW89_ETSI][0] = 30,
[1][1][RTW89_MKK][0] = 38,
@@ -48114,6 +48503,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][0] = 66,
[1][1][RTW89_CN][0] = 30,
[1][1][RTW89_QATAR][0] = 30,
+ [1][1][RTW89_UK][0] = 30,
[1][1][RTW89_FCC][1] = 66,
[1][1][RTW89_ETSI][1] = 30,
[1][1][RTW89_MKK][1] = 38,
@@ -48125,6 +48515,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][1] = 66,
[1][1][RTW89_CN][1] = 30,
[1][1][RTW89_QATAR][1] = 30,
+ [1][1][RTW89_UK][1] = 30,
[1][1][RTW89_FCC][2] = 70,
[1][1][RTW89_ETSI][2] = 30,
[1][1][RTW89_MKK][2] = 38,
@@ -48136,6 +48527,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][2] = 70,
[1][1][RTW89_CN][2] = 30,
[1][1][RTW89_QATAR][2] = 30,
+ [1][1][RTW89_UK][2] = 30,
[1][1][RTW89_FCC][3] = 74,
[1][1][RTW89_ETSI][3] = 30,
[1][1][RTW89_MKK][3] = 38,
@@ -48147,6 +48539,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][3] = 74,
[1][1][RTW89_CN][3] = 30,
[1][1][RTW89_QATAR][3] = 30,
+ [1][1][RTW89_UK][3] = 30,
[1][1][RTW89_FCC][4] = 78,
[1][1][RTW89_ETSI][4] = 30,
[1][1][RTW89_MKK][4] = 38,
@@ -48158,6 +48551,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][4] = 78,
[1][1][RTW89_CN][4] = 30,
[1][1][RTW89_QATAR][4] = 30,
+ [1][1][RTW89_UK][4] = 30,
[1][1][RTW89_FCC][5] = 78,
[1][1][RTW89_ETSI][5] = 30,
[1][1][RTW89_MKK][5] = 38,
@@ -48169,6 +48563,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][5] = 78,
[1][1][RTW89_CN][5] = 30,
[1][1][RTW89_QATAR][5] = 30,
+ [1][1][RTW89_UK][5] = 30,
[1][1][RTW89_FCC][6] = 78,
[1][1][RTW89_ETSI][6] = 30,
[1][1][RTW89_MKK][6] = 38,
@@ -48180,6 +48575,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][6] = 78,
[1][1][RTW89_CN][6] = 30,
[1][1][RTW89_QATAR][6] = 30,
+ [1][1][RTW89_UK][6] = 30,
[1][1][RTW89_FCC][7] = 74,
[1][1][RTW89_ETSI][7] = 30,
[1][1][RTW89_MKK][7] = 38,
@@ -48191,6 +48587,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][7] = 74,
[1][1][RTW89_CN][7] = 30,
[1][1][RTW89_QATAR][7] = 30,
+ [1][1][RTW89_UK][7] = 30,
[1][1][RTW89_FCC][8] = 70,
[1][1][RTW89_ETSI][8] = 30,
[1][1][RTW89_MKK][8] = 38,
@@ -48202,6 +48599,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][8] = 70,
[1][1][RTW89_CN][8] = 30,
[1][1][RTW89_QATAR][8] = 30,
+ [1][1][RTW89_UK][8] = 30,
[1][1][RTW89_FCC][9] = 66,
[1][1][RTW89_ETSI][9] = 30,
[1][1][RTW89_MKK][9] = 38,
@@ -48213,6 +48611,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][9] = 66,
[1][1][RTW89_CN][9] = 30,
[1][1][RTW89_QATAR][9] = 30,
+ [1][1][RTW89_UK][9] = 30,
[1][1][RTW89_FCC][10] = 66,
[1][1][RTW89_ETSI][10] = 30,
[1][1][RTW89_MKK][10] = 38,
@@ -48224,6 +48623,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][10] = 66,
[1][1][RTW89_CN][10] = 30,
[1][1][RTW89_QATAR][10] = 30,
+ [1][1][RTW89_UK][10] = 30,
[1][1][RTW89_FCC][11] = 60,
[1][1][RTW89_ETSI][11] = 30,
[1][1][RTW89_MKK][11] = 38,
@@ -48235,6 +48635,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][11] = 60,
[1][1][RTW89_CN][11] = 30,
[1][1][RTW89_QATAR][11] = 30,
+ [1][1][RTW89_UK][11] = 30,
[1][1][RTW89_FCC][12] = 32,
[1][1][RTW89_ETSI][12] = 30,
[1][1][RTW89_MKK][12] = 38,
@@ -48246,6 +48647,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][12] = 32,
[1][1][RTW89_CN][12] = 30,
[1][1][RTW89_QATAR][12] = 30,
+ [1][1][RTW89_UK][12] = 30,
[1][1][RTW89_FCC][13] = 127,
[1][1][RTW89_ETSI][13] = 127,
[1][1][RTW89_MKK][13] = 127,
@@ -48257,6 +48659,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][13] = 127,
[1][1][RTW89_CN][13] = 127,
[1][1][RTW89_QATAR][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
[2][0][RTW89_FCC][0] = 76,
[2][0][RTW89_ETSI][0] = 52,
[2][0][RTW89_MKK][0] = 64,
@@ -48268,6 +48671,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][0] = 76,
[2][0][RTW89_CN][0] = 52,
[2][0][RTW89_QATAR][0] = 52,
+ [2][0][RTW89_UK][0] = 52,
[2][0][RTW89_FCC][1] = 76,
[2][0][RTW89_ETSI][1] = 52,
[2][0][RTW89_MKK][1] = 64,
@@ -48279,6 +48683,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][1] = 76,
[2][0][RTW89_CN][1] = 52,
[2][0][RTW89_QATAR][1] = 52,
+ [2][0][RTW89_UK][1] = 52,
[2][0][RTW89_FCC][2] = 78,
[2][0][RTW89_ETSI][2] = 52,
[2][0][RTW89_MKK][2] = 64,
@@ -48290,6 +48695,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][2] = 78,
[2][0][RTW89_CN][2] = 52,
[2][0][RTW89_QATAR][2] = 52,
+ [2][0][RTW89_UK][2] = 52,
[2][0][RTW89_FCC][3] = 78,
[2][0][RTW89_ETSI][3] = 52,
[2][0][RTW89_MKK][3] = 64,
@@ -48301,6 +48707,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][3] = 78,
[2][0][RTW89_CN][3] = 52,
[2][0][RTW89_QATAR][3] = 52,
+ [2][0][RTW89_UK][3] = 52,
[2][0][RTW89_FCC][4] = 78,
[2][0][RTW89_ETSI][4] = 52,
[2][0][RTW89_MKK][4] = 64,
@@ -48312,6 +48719,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][4] = 78,
[2][0][RTW89_CN][4] = 52,
[2][0][RTW89_QATAR][4] = 52,
+ [2][0][RTW89_UK][4] = 52,
[2][0][RTW89_FCC][5] = 78,
[2][0][RTW89_ETSI][5] = 52,
[2][0][RTW89_MKK][5] = 64,
@@ -48323,6 +48731,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][5] = 78,
[2][0][RTW89_CN][5] = 52,
[2][0][RTW89_QATAR][5] = 52,
+ [2][0][RTW89_UK][5] = 52,
[2][0][RTW89_FCC][6] = 78,
[2][0][RTW89_ETSI][6] = 52,
[2][0][RTW89_MKK][6] = 64,
@@ -48334,6 +48743,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][6] = 78,
[2][0][RTW89_CN][6] = 52,
[2][0][RTW89_QATAR][6] = 52,
+ [2][0][RTW89_UK][6] = 52,
[2][0][RTW89_FCC][7] = 78,
[2][0][RTW89_ETSI][7] = 52,
[2][0][RTW89_MKK][7] = 64,
@@ -48345,6 +48755,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][7] = 78,
[2][0][RTW89_CN][7] = 52,
[2][0][RTW89_QATAR][7] = 52,
+ [2][0][RTW89_UK][7] = 52,
[2][0][RTW89_FCC][8] = 78,
[2][0][RTW89_ETSI][8] = 52,
[2][0][RTW89_MKK][8] = 64,
@@ -48356,6 +48767,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][8] = 78,
[2][0][RTW89_CN][8] = 52,
[2][0][RTW89_QATAR][8] = 52,
+ [2][0][RTW89_UK][8] = 52,
[2][0][RTW89_FCC][9] = 76,
[2][0][RTW89_ETSI][9] = 52,
[2][0][RTW89_MKK][9] = 64,
@@ -48367,6 +48779,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][9] = 76,
[2][0][RTW89_CN][9] = 52,
[2][0][RTW89_QATAR][9] = 52,
+ [2][0][RTW89_UK][9] = 52,
[2][0][RTW89_FCC][10] = 76,
[2][0][RTW89_ETSI][10] = 52,
[2][0][RTW89_MKK][10] = 64,
@@ -48378,6 +48791,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][10] = 76,
[2][0][RTW89_CN][10] = 52,
[2][0][RTW89_QATAR][10] = 52,
+ [2][0][RTW89_UK][10] = 52,
[2][0][RTW89_FCC][11] = 68,
[2][0][RTW89_ETSI][11] = 52,
[2][0][RTW89_MKK][11] = 64,
@@ -48389,6 +48803,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][11] = 68,
[2][0][RTW89_CN][11] = 52,
[2][0][RTW89_QATAR][11] = 52,
+ [2][0][RTW89_UK][11] = 52,
[2][0][RTW89_FCC][12] = 40,
[2][0][RTW89_ETSI][12] = 52,
[2][0][RTW89_MKK][12] = 64,
@@ -48400,6 +48815,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][12] = 40,
[2][0][RTW89_CN][12] = 52,
[2][0][RTW89_QATAR][12] = 52,
+ [2][0][RTW89_UK][12] = 52,
[2][0][RTW89_FCC][13] = 127,
[2][0][RTW89_ETSI][13] = 127,
[2][0][RTW89_MKK][13] = 127,
@@ -48411,6 +48827,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][13] = 127,
[2][0][RTW89_CN][13] = 127,
[2][0][RTW89_QATAR][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
[2][1][RTW89_FCC][0] = 68,
[2][1][RTW89_ETSI][0] = 40,
[2][1][RTW89_MKK][0] = 52,
@@ -48422,6 +48839,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][0] = 68,
[2][1][RTW89_CN][0] = 40,
[2][1][RTW89_QATAR][0] = 40,
+ [2][1][RTW89_UK][0] = 40,
[2][1][RTW89_FCC][1] = 68,
[2][1][RTW89_ETSI][1] = 40,
[2][1][RTW89_MKK][1] = 52,
@@ -48433,6 +48851,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][1] = 68,
[2][1][RTW89_CN][1] = 40,
[2][1][RTW89_QATAR][1] = 40,
+ [2][1][RTW89_UK][1] = 40,
[2][1][RTW89_FCC][2] = 72,
[2][1][RTW89_ETSI][2] = 40,
[2][1][RTW89_MKK][2] = 52,
@@ -48444,6 +48863,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][2] = 72,
[2][1][RTW89_CN][2] = 40,
[2][1][RTW89_QATAR][2] = 40,
+ [2][1][RTW89_UK][2] = 40,
[2][1][RTW89_FCC][3] = 76,
[2][1][RTW89_ETSI][3] = 40,
[2][1][RTW89_MKK][3] = 52,
@@ -48455,6 +48875,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][3] = 76,
[2][1][RTW89_CN][3] = 40,
[2][1][RTW89_QATAR][3] = 40,
+ [2][1][RTW89_UK][3] = 40,
[2][1][RTW89_FCC][4] = 78,
[2][1][RTW89_ETSI][4] = 40,
[2][1][RTW89_MKK][4] = 52,
@@ -48466,6 +48887,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][4] = 78,
[2][1][RTW89_CN][4] = 40,
[2][1][RTW89_QATAR][4] = 40,
+ [2][1][RTW89_UK][4] = 40,
[2][1][RTW89_FCC][5] = 78,
[2][1][RTW89_ETSI][5] = 40,
[2][1][RTW89_MKK][5] = 52,
@@ -48477,6 +48899,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][5] = 78,
[2][1][RTW89_CN][5] = 40,
[2][1][RTW89_QATAR][5] = 40,
+ [2][1][RTW89_UK][5] = 40,
[2][1][RTW89_FCC][6] = 78,
[2][1][RTW89_ETSI][6] = 40,
[2][1][RTW89_MKK][6] = 52,
@@ -48488,6 +48911,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][6] = 78,
[2][1][RTW89_CN][6] = 40,
[2][1][RTW89_QATAR][6] = 40,
+ [2][1][RTW89_UK][6] = 40,
[2][1][RTW89_FCC][7] = 78,
[2][1][RTW89_ETSI][7] = 40,
[2][1][RTW89_MKK][7] = 52,
@@ -48499,6 +48923,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][7] = 78,
[2][1][RTW89_CN][7] = 40,
[2][1][RTW89_QATAR][7] = 40,
+ [2][1][RTW89_UK][7] = 40,
[2][1][RTW89_FCC][8] = 74,
[2][1][RTW89_ETSI][8] = 40,
[2][1][RTW89_MKK][8] = 52,
@@ -48510,6 +48935,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][8] = 74,
[2][1][RTW89_CN][8] = 40,
[2][1][RTW89_QATAR][8] = 40,
+ [2][1][RTW89_UK][8] = 40,
[2][1][RTW89_FCC][9] = 70,
[2][1][RTW89_ETSI][9] = 40,
[2][1][RTW89_MKK][9] = 52,
@@ -48521,6 +48947,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][9] = 70,
[2][1][RTW89_CN][9] = 40,
[2][1][RTW89_QATAR][9] = 40,
+ [2][1][RTW89_UK][9] = 40,
[2][1][RTW89_FCC][10] = 70,
[2][1][RTW89_ETSI][10] = 40,
[2][1][RTW89_MKK][10] = 52,
@@ -48532,6 +48959,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][10] = 70,
[2][1][RTW89_CN][10] = 40,
[2][1][RTW89_QATAR][10] = 40,
+ [2][1][RTW89_UK][10] = 40,
[2][1][RTW89_FCC][11] = 48,
[2][1][RTW89_ETSI][11] = 40,
[2][1][RTW89_MKK][11] = 52,
@@ -48543,6 +48971,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][11] = 48,
[2][1][RTW89_CN][11] = 40,
[2][1][RTW89_QATAR][11] = 40,
+ [2][1][RTW89_UK][11] = 40,
[2][1][RTW89_FCC][12] = 26,
[2][1][RTW89_ETSI][12] = 40,
[2][1][RTW89_MKK][12] = 52,
@@ -48554,6 +48983,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][12] = 26,
[2][1][RTW89_CN][12] = 40,
[2][1][RTW89_QATAR][12] = 40,
+ [2][1][RTW89_UK][12] = 40,
[2][1][RTW89_FCC][13] = 127,
[2][1][RTW89_ETSI][13] = 127,
[2][1][RTW89_MKK][13] = 127,
@@ -48565,6 +48995,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][13] = 127,
[2][1][RTW89_CN][13] = 127,
[2][1][RTW89_QATAR][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -48730,6 +49161,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][0] = 48,
[0][0][RTW89_CN][0] = 24,
[0][0][RTW89_QATAR][0] = 24,
+ [0][0][RTW89_UK][0] = 24,
[0][0][RTW89_FCC][2] = 48,
[0][0][RTW89_ETSI][2] = 24,
[0][0][RTW89_MKK][2] = 26,
@@ -48741,6 +49173,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][2] = 48,
[0][0][RTW89_CN][2] = 24,
[0][0][RTW89_QATAR][2] = 24,
+ [0][0][RTW89_UK][2] = 24,
[0][0][RTW89_FCC][4] = 48,
[0][0][RTW89_ETSI][4] = 24,
[0][0][RTW89_MKK][4] = 26,
@@ -48752,6 +49185,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][4] = 48,
[0][0][RTW89_CN][4] = 24,
[0][0][RTW89_QATAR][4] = 24,
+ [0][0][RTW89_UK][4] = 24,
[0][0][RTW89_FCC][6] = 48,
[0][0][RTW89_ETSI][6] = 24,
[0][0][RTW89_MKK][6] = 26,
@@ -48763,6 +49197,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][6] = 48,
[0][0][RTW89_CN][6] = 24,
[0][0][RTW89_QATAR][6] = 24,
+ [0][0][RTW89_UK][6] = 24,
[0][0][RTW89_FCC][8] = 48,
[0][0][RTW89_ETSI][8] = 24,
[0][0][RTW89_MKK][8] = 26,
@@ -48774,6 +49209,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][8] = 48,
[0][0][RTW89_CN][8] = 24,
[0][0][RTW89_QATAR][8] = 24,
+ [0][0][RTW89_UK][8] = 24,
[0][0][RTW89_FCC][10] = 48,
[0][0][RTW89_ETSI][10] = 24,
[0][0][RTW89_MKK][10] = 26,
@@ -48785,6 +49221,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][10] = 48,
[0][0][RTW89_CN][10] = 24,
[0][0][RTW89_QATAR][10] = 24,
+ [0][0][RTW89_UK][10] = 24,
[0][0][RTW89_FCC][12] = 48,
[0][0][RTW89_ETSI][12] = 24,
[0][0][RTW89_MKK][12] = 26,
@@ -48796,6 +49233,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][12] = 48,
[0][0][RTW89_CN][12] = 24,
[0][0][RTW89_QATAR][12] = 24,
+ [0][0][RTW89_UK][12] = 24,
[0][0][RTW89_FCC][14] = 48,
[0][0][RTW89_ETSI][14] = 24,
[0][0][RTW89_MKK][14] = 26,
@@ -48807,6 +49245,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][14] = 48,
[0][0][RTW89_CN][14] = 24,
[0][0][RTW89_QATAR][14] = 24,
+ [0][0][RTW89_UK][14] = 24,
[0][0][RTW89_FCC][15] = 48,
[0][0][RTW89_ETSI][15] = 24,
[0][0][RTW89_MKK][15] = 44,
@@ -48818,6 +49257,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][15] = 48,
[0][0][RTW89_CN][15] = 127,
[0][0][RTW89_QATAR][15] = 24,
+ [0][0][RTW89_UK][15] = 24,
[0][0][RTW89_FCC][17] = 48,
[0][0][RTW89_ETSI][17] = 24,
[0][0][RTW89_MKK][17] = 44,
@@ -48829,6 +49269,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][17] = 48,
[0][0][RTW89_CN][17] = 127,
[0][0][RTW89_QATAR][17] = 24,
+ [0][0][RTW89_UK][17] = 24,
[0][0][RTW89_FCC][19] = 48,
[0][0][RTW89_ETSI][19] = 24,
[0][0][RTW89_MKK][19] = 44,
@@ -48840,6 +49281,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][19] = 48,
[0][0][RTW89_CN][19] = 127,
[0][0][RTW89_QATAR][19] = 24,
+ [0][0][RTW89_UK][19] = 24,
[0][0][RTW89_FCC][21] = 48,
[0][0][RTW89_ETSI][21] = 24,
[0][0][RTW89_MKK][21] = 44,
@@ -48851,6 +49293,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][21] = 48,
[0][0][RTW89_CN][21] = 127,
[0][0][RTW89_QATAR][21] = 24,
+ [0][0][RTW89_UK][21] = 24,
[0][0][RTW89_FCC][23] = 48,
[0][0][RTW89_ETSI][23] = 24,
[0][0][RTW89_MKK][23] = 44,
@@ -48862,6 +49305,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][23] = 48,
[0][0][RTW89_CN][23] = 127,
[0][0][RTW89_QATAR][23] = 24,
+ [0][0][RTW89_UK][23] = 24,
[0][0][RTW89_FCC][25] = 48,
[0][0][RTW89_ETSI][25] = 24,
[0][0][RTW89_MKK][25] = 44,
@@ -48873,6 +49317,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][25] = 48,
[0][0][RTW89_CN][25] = 127,
[0][0][RTW89_QATAR][25] = 24,
+ [0][0][RTW89_UK][25] = 24,
[0][0][RTW89_FCC][27] = 48,
[0][0][RTW89_ETSI][27] = 24,
[0][0][RTW89_MKK][27] = 44,
@@ -48884,6 +49329,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][27] = 48,
[0][0][RTW89_CN][27] = 127,
[0][0][RTW89_QATAR][27] = 24,
+ [0][0][RTW89_UK][27] = 24,
[0][0][RTW89_FCC][29] = 48,
[0][0][RTW89_ETSI][29] = 24,
[0][0][RTW89_MKK][29] = 44,
@@ -48895,6 +49341,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][29] = 48,
[0][0][RTW89_CN][29] = 127,
[0][0][RTW89_QATAR][29] = 24,
+ [0][0][RTW89_UK][29] = 24,
[0][0][RTW89_FCC][31] = 48,
[0][0][RTW89_ETSI][31] = 24,
[0][0][RTW89_MKK][31] = 44,
@@ -48906,6 +49353,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][31] = 48,
[0][0][RTW89_CN][31] = 127,
[0][0][RTW89_QATAR][31] = 24,
+ [0][0][RTW89_UK][31] = 24,
[0][0][RTW89_FCC][33] = 48,
[0][0][RTW89_ETSI][33] = 24,
[0][0][RTW89_MKK][33] = 44,
@@ -48917,6 +49365,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][33] = 48,
[0][0][RTW89_CN][33] = 127,
[0][0][RTW89_QATAR][33] = 24,
+ [0][0][RTW89_UK][33] = 24,
[0][0][RTW89_FCC][35] = 48,
[0][0][RTW89_ETSI][35] = 24,
[0][0][RTW89_MKK][35] = 44,
@@ -48928,6 +49377,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][35] = 48,
[0][0][RTW89_CN][35] = 127,
[0][0][RTW89_QATAR][35] = 24,
+ [0][0][RTW89_UK][35] = 24,
[0][0][RTW89_FCC][37] = 48,
[0][0][RTW89_ETSI][37] = 127,
[0][0][RTW89_MKK][37] = 44,
@@ -48939,6 +49389,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][37] = 48,
[0][0][RTW89_CN][37] = 127,
[0][0][RTW89_QATAR][37] = 127,
+ [0][0][RTW89_UK][37] = 58,
[0][0][RTW89_FCC][38] = 76,
[0][0][RTW89_ETSI][38] = 28,
[0][0][RTW89_MKK][38] = 127,
@@ -48950,6 +49401,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][38] = 76,
[0][0][RTW89_CN][38] = 62,
[0][0][RTW89_QATAR][38] = 28,
+ [0][0][RTW89_UK][38] = 28,
[0][0][RTW89_FCC][40] = 76,
[0][0][RTW89_ETSI][40] = 28,
[0][0][RTW89_MKK][40] = 127,
@@ -48961,6 +49413,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][40] = 76,
[0][0][RTW89_CN][40] = 62,
[0][0][RTW89_QATAR][40] = 28,
+ [0][0][RTW89_UK][40] = 28,
[0][0][RTW89_FCC][42] = 76,
[0][0][RTW89_ETSI][42] = 28,
[0][0][RTW89_MKK][42] = 127,
@@ -48972,6 +49425,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][42] = 76,
[0][0][RTW89_CN][42] = 62,
[0][0][RTW89_QATAR][42] = 28,
+ [0][0][RTW89_UK][42] = 28,
[0][0][RTW89_FCC][44] = 76,
[0][0][RTW89_ETSI][44] = 28,
[0][0][RTW89_MKK][44] = 127,
@@ -48983,6 +49437,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][44] = 76,
[0][0][RTW89_CN][44] = 62,
[0][0][RTW89_QATAR][44] = 28,
+ [0][0][RTW89_UK][44] = 28,
[0][0][RTW89_FCC][46] = 76,
[0][0][RTW89_ETSI][46] = 28,
[0][0][RTW89_MKK][46] = 127,
@@ -48994,6 +49449,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][46] = 76,
[0][0][RTW89_CN][46] = 62,
[0][0][RTW89_QATAR][46] = 28,
+ [0][0][RTW89_UK][46] = 28,
[0][1][RTW89_FCC][0] = 36,
[0][1][RTW89_ETSI][0] = 12,
[0][1][RTW89_MKK][0] = 14,
@@ -49005,6 +49461,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][0] = 36,
[0][1][RTW89_CN][0] = 12,
[0][1][RTW89_QATAR][0] = 12,
+ [0][1][RTW89_UK][0] = 12,
[0][1][RTW89_FCC][2] = 36,
[0][1][RTW89_ETSI][2] = 12,
[0][1][RTW89_MKK][2] = 14,
@@ -49016,6 +49473,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][2] = 36,
[0][1][RTW89_CN][2] = 12,
[0][1][RTW89_QATAR][2] = 12,
+ [0][1][RTW89_UK][2] = 12,
[0][1][RTW89_FCC][4] = 36,
[0][1][RTW89_ETSI][4] = 12,
[0][1][RTW89_MKK][4] = 14,
@@ -49027,6 +49485,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][4] = 36,
[0][1][RTW89_CN][4] = 12,
[0][1][RTW89_QATAR][4] = 12,
+ [0][1][RTW89_UK][4] = 12,
[0][1][RTW89_FCC][6] = 36,
[0][1][RTW89_ETSI][6] = 12,
[0][1][RTW89_MKK][6] = 14,
@@ -49038,6 +49497,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][6] = 36,
[0][1][RTW89_CN][6] = 12,
[0][1][RTW89_QATAR][6] = 12,
+ [0][1][RTW89_UK][6] = 12,
[0][1][RTW89_FCC][8] = 36,
[0][1][RTW89_ETSI][8] = 12,
[0][1][RTW89_MKK][8] = 14,
@@ -49049,6 +49509,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][8] = 36,
[0][1][RTW89_CN][8] = 12,
[0][1][RTW89_QATAR][8] = 12,
+ [0][1][RTW89_UK][8] = 12,
[0][1][RTW89_FCC][10] = 36,
[0][1][RTW89_ETSI][10] = 12,
[0][1][RTW89_MKK][10] = 14,
@@ -49060,6 +49521,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][10] = 36,
[0][1][RTW89_CN][10] = 12,
[0][1][RTW89_QATAR][10] = 12,
+ [0][1][RTW89_UK][10] = 12,
[0][1][RTW89_FCC][12] = 36,
[0][1][RTW89_ETSI][12] = 12,
[0][1][RTW89_MKK][12] = 14,
@@ -49071,6 +49533,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][12] = 36,
[0][1][RTW89_CN][12] = 12,
[0][1][RTW89_QATAR][12] = 12,
+ [0][1][RTW89_UK][12] = 12,
[0][1][RTW89_FCC][14] = 36,
[0][1][RTW89_ETSI][14] = 12,
[0][1][RTW89_MKK][14] = 14,
@@ -49082,6 +49545,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][14] = 36,
[0][1][RTW89_CN][14] = 12,
[0][1][RTW89_QATAR][14] = 12,
+ [0][1][RTW89_UK][14] = 12,
[0][1][RTW89_FCC][15] = 36,
[0][1][RTW89_ETSI][15] = 12,
[0][1][RTW89_MKK][15] = 32,
@@ -49093,6 +49557,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][15] = 36,
[0][1][RTW89_CN][15] = 127,
[0][1][RTW89_QATAR][15] = 12,
+ [0][1][RTW89_UK][15] = 12,
[0][1][RTW89_FCC][17] = 36,
[0][1][RTW89_ETSI][17] = 12,
[0][1][RTW89_MKK][17] = 32,
@@ -49104,6 +49569,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][17] = 36,
[0][1][RTW89_CN][17] = 127,
[0][1][RTW89_QATAR][17] = 12,
+ [0][1][RTW89_UK][17] = 12,
[0][1][RTW89_FCC][19] = 36,
[0][1][RTW89_ETSI][19] = 12,
[0][1][RTW89_MKK][19] = 32,
@@ -49115,6 +49581,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][19] = 36,
[0][1][RTW89_CN][19] = 127,
[0][1][RTW89_QATAR][19] = 12,
+ [0][1][RTW89_UK][19] = 12,
[0][1][RTW89_FCC][21] = 36,
[0][1][RTW89_ETSI][21] = 12,
[0][1][RTW89_MKK][21] = 32,
@@ -49126,6 +49593,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][21] = 36,
[0][1][RTW89_CN][21] = 127,
[0][1][RTW89_QATAR][21] = 12,
+ [0][1][RTW89_UK][21] = 12,
[0][1][RTW89_FCC][23] = 36,
[0][1][RTW89_ETSI][23] = 12,
[0][1][RTW89_MKK][23] = 32,
@@ -49137,6 +49605,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][23] = 36,
[0][1][RTW89_CN][23] = 127,
[0][1][RTW89_QATAR][23] = 12,
+ [0][1][RTW89_UK][23] = 12,
[0][1][RTW89_FCC][25] = 36,
[0][1][RTW89_ETSI][25] = 12,
[0][1][RTW89_MKK][25] = 32,
@@ -49148,6 +49617,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][25] = 36,
[0][1][RTW89_CN][25] = 127,
[0][1][RTW89_QATAR][25] = 12,
+ [0][1][RTW89_UK][25] = 12,
[0][1][RTW89_FCC][27] = 36,
[0][1][RTW89_ETSI][27] = 12,
[0][1][RTW89_MKK][27] = 32,
@@ -49159,6 +49629,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][27] = 36,
[0][1][RTW89_CN][27] = 127,
[0][1][RTW89_QATAR][27] = 12,
+ [0][1][RTW89_UK][27] = 12,
[0][1][RTW89_FCC][29] = 36,
[0][1][RTW89_ETSI][29] = 12,
[0][1][RTW89_MKK][29] = 32,
@@ -49170,6 +49641,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][29] = 36,
[0][1][RTW89_CN][29] = 127,
[0][1][RTW89_QATAR][29] = 12,
+ [0][1][RTW89_UK][29] = 12,
[0][1][RTW89_FCC][31] = 36,
[0][1][RTW89_ETSI][31] = 12,
[0][1][RTW89_MKK][31] = 32,
@@ -49181,6 +49653,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][31] = 36,
[0][1][RTW89_CN][31] = 127,
[0][1][RTW89_QATAR][31] = 12,
+ [0][1][RTW89_UK][31] = 12,
[0][1][RTW89_FCC][33] = 36,
[0][1][RTW89_ETSI][33] = 12,
[0][1][RTW89_MKK][33] = 32,
@@ -49192,6 +49665,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][33] = 36,
[0][1][RTW89_CN][33] = 127,
[0][1][RTW89_QATAR][33] = 12,
+ [0][1][RTW89_UK][33] = 12,
[0][1][RTW89_FCC][35] = 36,
[0][1][RTW89_ETSI][35] = 12,
[0][1][RTW89_MKK][35] = 32,
@@ -49203,6 +49677,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][35] = 36,
[0][1][RTW89_CN][35] = 127,
[0][1][RTW89_QATAR][35] = 12,
+ [0][1][RTW89_UK][35] = 12,
[0][1][RTW89_FCC][37] = 36,
[0][1][RTW89_ETSI][37] = 127,
[0][1][RTW89_MKK][37] = 32,
@@ -49214,6 +49689,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][37] = 36,
[0][1][RTW89_CN][37] = 127,
[0][1][RTW89_QATAR][37] = 127,
+ [0][1][RTW89_UK][37] = 46,
[0][1][RTW89_FCC][38] = 72,
[0][1][RTW89_ETSI][38] = 16,
[0][1][RTW89_MKK][38] = 127,
@@ -49225,6 +49701,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][38] = 72,
[0][1][RTW89_CN][38] = 50,
[0][1][RTW89_QATAR][38] = 16,
+ [0][1][RTW89_UK][38] = 16,
[0][1][RTW89_FCC][40] = 76,
[0][1][RTW89_ETSI][40] = 16,
[0][1][RTW89_MKK][40] = 127,
@@ -49236,6 +49713,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][40] = 76,
[0][1][RTW89_CN][40] = 50,
[0][1][RTW89_QATAR][40] = 16,
+ [0][1][RTW89_UK][40] = 16,
[0][1][RTW89_FCC][42] = 76,
[0][1][RTW89_ETSI][42] = 16,
[0][1][RTW89_MKK][42] = 127,
@@ -49247,6 +49725,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][42] = 76,
[0][1][RTW89_CN][42] = 50,
[0][1][RTW89_QATAR][42] = 16,
+ [0][1][RTW89_UK][42] = 16,
[0][1][RTW89_FCC][44] = 76,
[0][1][RTW89_ETSI][44] = 16,
[0][1][RTW89_MKK][44] = 127,
@@ -49258,6 +49737,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][44] = 76,
[0][1][RTW89_CN][44] = 50,
[0][1][RTW89_QATAR][44] = 16,
+ [0][1][RTW89_UK][44] = 16,
[0][1][RTW89_FCC][46] = 76,
[0][1][RTW89_ETSI][46] = 16,
[0][1][RTW89_MKK][46] = 127,
@@ -49269,6 +49749,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][46] = 76,
[0][1][RTW89_CN][46] = 50,
[0][1][RTW89_QATAR][46] = 16,
+ [0][1][RTW89_UK][46] = 16,
[1][0][RTW89_FCC][0] = 62,
[1][0][RTW89_ETSI][0] = 36,
[1][0][RTW89_MKK][0] = 36,
@@ -49280,6 +49761,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][0] = 62,
[1][0][RTW89_CN][0] = 36,
[1][0][RTW89_QATAR][0] = 36,
+ [1][0][RTW89_UK][0] = 36,
[1][0][RTW89_FCC][2] = 62,
[1][0][RTW89_ETSI][2] = 36,
[1][0][RTW89_MKK][2] = 36,
@@ -49291,6 +49773,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][2] = 62,
[1][0][RTW89_CN][2] = 36,
[1][0][RTW89_QATAR][2] = 36,
+ [1][0][RTW89_UK][2] = 36,
[1][0][RTW89_FCC][4] = 62,
[1][0][RTW89_ETSI][4] = 36,
[1][0][RTW89_MKK][4] = 36,
@@ -49302,6 +49785,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][4] = 62,
[1][0][RTW89_CN][4] = 36,
[1][0][RTW89_QATAR][4] = 36,
+ [1][0][RTW89_UK][4] = 36,
[1][0][RTW89_FCC][6] = 62,
[1][0][RTW89_ETSI][6] = 36,
[1][0][RTW89_MKK][6] = 36,
@@ -49313,6 +49797,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][6] = 62,
[1][0][RTW89_CN][6] = 36,
[1][0][RTW89_QATAR][6] = 36,
+ [1][0][RTW89_UK][6] = 36,
[1][0][RTW89_FCC][8] = 62,
[1][0][RTW89_ETSI][8] = 36,
[1][0][RTW89_MKK][8] = 36,
@@ -49324,6 +49809,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][8] = 62,
[1][0][RTW89_CN][8] = 36,
[1][0][RTW89_QATAR][8] = 36,
+ [1][0][RTW89_UK][8] = 36,
[1][0][RTW89_FCC][10] = 62,
[1][0][RTW89_ETSI][10] = 36,
[1][0][RTW89_MKK][10] = 36,
@@ -49335,6 +49821,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][10] = 62,
[1][0][RTW89_CN][10] = 36,
[1][0][RTW89_QATAR][10] = 36,
+ [1][0][RTW89_UK][10] = 36,
[1][0][RTW89_FCC][12] = 62,
[1][0][RTW89_ETSI][12] = 36,
[1][0][RTW89_MKK][12] = 36,
@@ -49346,6 +49833,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][12] = 62,
[1][0][RTW89_CN][12] = 36,
[1][0][RTW89_QATAR][12] = 36,
+ [1][0][RTW89_UK][12] = 36,
[1][0][RTW89_FCC][14] = 62,
[1][0][RTW89_ETSI][14] = 36,
[1][0][RTW89_MKK][14] = 36,
@@ -49357,6 +49845,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][14] = 62,
[1][0][RTW89_CN][14] = 36,
[1][0][RTW89_QATAR][14] = 36,
+ [1][0][RTW89_UK][14] = 36,
[1][0][RTW89_FCC][15] = 62,
[1][0][RTW89_ETSI][15] = 36,
[1][0][RTW89_MKK][15] = 58,
@@ -49368,6 +49857,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][15] = 62,
[1][0][RTW89_CN][15] = 127,
[1][0][RTW89_QATAR][15] = 36,
+ [1][0][RTW89_UK][15] = 36,
[1][0][RTW89_FCC][17] = 62,
[1][0][RTW89_ETSI][17] = 36,
[1][0][RTW89_MKK][17] = 58,
@@ -49379,6 +49869,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][17] = 62,
[1][0][RTW89_CN][17] = 127,
[1][0][RTW89_QATAR][17] = 36,
+ [1][0][RTW89_UK][17] = 36,
[1][0][RTW89_FCC][19] = 62,
[1][0][RTW89_ETSI][19] = 36,
[1][0][RTW89_MKK][19] = 58,
@@ -49390,6 +49881,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][19] = 62,
[1][0][RTW89_CN][19] = 127,
[1][0][RTW89_QATAR][19] = 36,
+ [1][0][RTW89_UK][19] = 36,
[1][0][RTW89_FCC][21] = 62,
[1][0][RTW89_ETSI][21] = 36,
[1][0][RTW89_MKK][21] = 58,
@@ -49401,6 +49893,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][21] = 62,
[1][0][RTW89_CN][21] = 127,
[1][0][RTW89_QATAR][21] = 36,
+ [1][0][RTW89_UK][21] = 36,
[1][0][RTW89_FCC][23] = 62,
[1][0][RTW89_ETSI][23] = 36,
[1][0][RTW89_MKK][23] = 58,
@@ -49412,6 +49905,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][23] = 62,
[1][0][RTW89_CN][23] = 127,
[1][0][RTW89_QATAR][23] = 36,
+ [1][0][RTW89_UK][23] = 36,
[1][0][RTW89_FCC][25] = 62,
[1][0][RTW89_ETSI][25] = 36,
[1][0][RTW89_MKK][25] = 58,
@@ -49423,6 +49917,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][25] = 62,
[1][0][RTW89_CN][25] = 127,
[1][0][RTW89_QATAR][25] = 36,
+ [1][0][RTW89_UK][25] = 36,
[1][0][RTW89_FCC][27] = 62,
[1][0][RTW89_ETSI][27] = 36,
[1][0][RTW89_MKK][27] = 58,
@@ -49434,6 +49929,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][27] = 62,
[1][0][RTW89_CN][27] = 127,
[1][0][RTW89_QATAR][27] = 36,
+ [1][0][RTW89_UK][27] = 36,
[1][0][RTW89_FCC][29] = 62,
[1][0][RTW89_ETSI][29] = 36,
[1][0][RTW89_MKK][29] = 58,
@@ -49445,6 +49941,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][29] = 62,
[1][0][RTW89_CN][29] = 127,
[1][0][RTW89_QATAR][29] = 36,
+ [1][0][RTW89_UK][29] = 36,
[1][0][RTW89_FCC][31] = 62,
[1][0][RTW89_ETSI][31] = 36,
[1][0][RTW89_MKK][31] = 58,
@@ -49456,6 +49953,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][31] = 62,
[1][0][RTW89_CN][31] = 127,
[1][0][RTW89_QATAR][31] = 36,
+ [1][0][RTW89_UK][31] = 36,
[1][0][RTW89_FCC][33] = 62,
[1][0][RTW89_ETSI][33] = 36,
[1][0][RTW89_MKK][33] = 58,
@@ -49467,6 +49965,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][33] = 62,
[1][0][RTW89_CN][33] = 127,
[1][0][RTW89_QATAR][33] = 36,
+ [1][0][RTW89_UK][33] = 36,
[1][0][RTW89_FCC][35] = 62,
[1][0][RTW89_ETSI][35] = 36,
[1][0][RTW89_MKK][35] = 58,
@@ -49478,6 +49977,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][35] = 62,
[1][0][RTW89_CN][35] = 127,
[1][0][RTW89_QATAR][35] = 36,
+ [1][0][RTW89_UK][35] = 36,
[1][0][RTW89_FCC][37] = 62,
[1][0][RTW89_ETSI][37] = 127,
[1][0][RTW89_MKK][37] = 58,
@@ -49489,6 +49989,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][37] = 62,
[1][0][RTW89_CN][37] = 127,
[1][0][RTW89_QATAR][37] = 127,
+ [1][0][RTW89_UK][37] = 64,
[1][0][RTW89_FCC][38] = 76,
[1][0][RTW89_ETSI][38] = 28,
[1][0][RTW89_MKK][38] = 127,
@@ -49500,6 +50001,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][38] = 76,
[1][0][RTW89_CN][38] = 74,
[1][0][RTW89_QATAR][38] = 28,
+ [1][0][RTW89_UK][38] = 34,
[1][0][RTW89_FCC][40] = 76,
[1][0][RTW89_ETSI][40] = 28,
[1][0][RTW89_MKK][40] = 127,
@@ -49511,6 +50013,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][40] = 76,
[1][0][RTW89_CN][40] = 74,
[1][0][RTW89_QATAR][40] = 28,
+ [1][0][RTW89_UK][40] = 34,
[1][0][RTW89_FCC][42] = 76,
[1][0][RTW89_ETSI][42] = 28,
[1][0][RTW89_MKK][42] = 127,
@@ -49522,6 +50025,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][42] = 76,
[1][0][RTW89_CN][42] = 74,
[1][0][RTW89_QATAR][42] = 28,
+ [1][0][RTW89_UK][42] = 34,
[1][0][RTW89_FCC][44] = 76,
[1][0][RTW89_ETSI][44] = 28,
[1][0][RTW89_MKK][44] = 127,
@@ -49533,6 +50037,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][44] = 76,
[1][0][RTW89_CN][44] = 74,
[1][0][RTW89_QATAR][44] = 28,
+ [1][0][RTW89_UK][44] = 34,
[1][0][RTW89_FCC][46] = 76,
[1][0][RTW89_ETSI][46] = 28,
[1][0][RTW89_MKK][46] = 127,
@@ -49544,6 +50049,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][46] = 76,
[1][0][RTW89_CN][46] = 74,
[1][0][RTW89_QATAR][46] = 28,
+ [1][0][RTW89_UK][46] = 34,
[1][1][RTW89_FCC][0] = 46,
[1][1][RTW89_ETSI][0] = 22,
[1][1][RTW89_MKK][0] = 24,
@@ -49555,6 +50061,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][0] = 46,
[1][1][RTW89_CN][0] = 22,
[1][1][RTW89_QATAR][0] = 22,
+ [1][1][RTW89_UK][0] = 22,
[1][1][RTW89_FCC][2] = 46,
[1][1][RTW89_ETSI][2] = 22,
[1][1][RTW89_MKK][2] = 24,
@@ -49566,6 +50073,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][2] = 46,
[1][1][RTW89_CN][2] = 22,
[1][1][RTW89_QATAR][2] = 22,
+ [1][1][RTW89_UK][2] = 22,
[1][1][RTW89_FCC][4] = 46,
[1][1][RTW89_ETSI][4] = 22,
[1][1][RTW89_MKK][4] = 24,
@@ -49577,6 +50085,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][4] = 46,
[1][1][RTW89_CN][4] = 22,
[1][1][RTW89_QATAR][4] = 22,
+ [1][1][RTW89_UK][4] = 22,
[1][1][RTW89_FCC][6] = 46,
[1][1][RTW89_ETSI][6] = 22,
[1][1][RTW89_MKK][6] = 24,
@@ -49588,6 +50097,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][6] = 46,
[1][1][RTW89_CN][6] = 22,
[1][1][RTW89_QATAR][6] = 22,
+ [1][1][RTW89_UK][6] = 22,
[1][1][RTW89_FCC][8] = 46,
[1][1][RTW89_ETSI][8] = 22,
[1][1][RTW89_MKK][8] = 24,
@@ -49599,6 +50109,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][8] = 46,
[1][1][RTW89_CN][8] = 22,
[1][1][RTW89_QATAR][8] = 22,
+ [1][1][RTW89_UK][8] = 22,
[1][1][RTW89_FCC][10] = 46,
[1][1][RTW89_ETSI][10] = 22,
[1][1][RTW89_MKK][10] = 24,
@@ -49610,6 +50121,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][10] = 46,
[1][1][RTW89_CN][10] = 22,
[1][1][RTW89_QATAR][10] = 22,
+ [1][1][RTW89_UK][10] = 22,
[1][1][RTW89_FCC][12] = 46,
[1][1][RTW89_ETSI][12] = 22,
[1][1][RTW89_MKK][12] = 24,
@@ -49621,6 +50133,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][12] = 46,
[1][1][RTW89_CN][12] = 22,
[1][1][RTW89_QATAR][12] = 22,
+ [1][1][RTW89_UK][12] = 22,
[1][1][RTW89_FCC][14] = 46,
[1][1][RTW89_ETSI][14] = 22,
[1][1][RTW89_MKK][14] = 24,
@@ -49632,6 +50145,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][14] = 46,
[1][1][RTW89_CN][14] = 22,
[1][1][RTW89_QATAR][14] = 22,
+ [1][1][RTW89_UK][14] = 22,
[1][1][RTW89_FCC][15] = 46,
[1][1][RTW89_ETSI][15] = 22,
[1][1][RTW89_MKK][15] = 46,
@@ -49643,6 +50157,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][15] = 46,
[1][1][RTW89_CN][15] = 127,
[1][1][RTW89_QATAR][15] = 22,
+ [1][1][RTW89_UK][15] = 22,
[1][1][RTW89_FCC][17] = 46,
[1][1][RTW89_ETSI][17] = 22,
[1][1][RTW89_MKK][17] = 46,
@@ -49654,6 +50169,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][17] = 46,
[1][1][RTW89_CN][17] = 127,
[1][1][RTW89_QATAR][17] = 22,
+ [1][1][RTW89_UK][17] = 22,
[1][1][RTW89_FCC][19] = 46,
[1][1][RTW89_ETSI][19] = 22,
[1][1][RTW89_MKK][19] = 46,
@@ -49665,6 +50181,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][19] = 46,
[1][1][RTW89_CN][19] = 127,
[1][1][RTW89_QATAR][19] = 22,
+ [1][1][RTW89_UK][19] = 22,
[1][1][RTW89_FCC][21] = 46,
[1][1][RTW89_ETSI][21] = 22,
[1][1][RTW89_MKK][21] = 46,
@@ -49676,6 +50193,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][21] = 46,
[1][1][RTW89_CN][21] = 127,
[1][1][RTW89_QATAR][21] = 22,
+ [1][1][RTW89_UK][21] = 22,
[1][1][RTW89_FCC][23] = 46,
[1][1][RTW89_ETSI][23] = 22,
[1][1][RTW89_MKK][23] = 46,
@@ -49687,6 +50205,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][23] = 46,
[1][1][RTW89_CN][23] = 127,
[1][1][RTW89_QATAR][23] = 22,
+ [1][1][RTW89_UK][23] = 22,
[1][1][RTW89_FCC][25] = 46,
[1][1][RTW89_ETSI][25] = 22,
[1][1][RTW89_MKK][25] = 46,
@@ -49698,6 +50217,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][25] = 46,
[1][1][RTW89_CN][25] = 127,
[1][1][RTW89_QATAR][25] = 22,
+ [1][1][RTW89_UK][25] = 22,
[1][1][RTW89_FCC][27] = 46,
[1][1][RTW89_ETSI][27] = 22,
[1][1][RTW89_MKK][27] = 46,
@@ -49709,6 +50229,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][27] = 46,
[1][1][RTW89_CN][27] = 127,
[1][1][RTW89_QATAR][27] = 22,
+ [1][1][RTW89_UK][27] = 22,
[1][1][RTW89_FCC][29] = 46,
[1][1][RTW89_ETSI][29] = 22,
[1][1][RTW89_MKK][29] = 46,
@@ -49720,6 +50241,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][29] = 46,
[1][1][RTW89_CN][29] = 127,
[1][1][RTW89_QATAR][29] = 22,
+ [1][1][RTW89_UK][29] = 22,
[1][1][RTW89_FCC][31] = 46,
[1][1][RTW89_ETSI][31] = 22,
[1][1][RTW89_MKK][31] = 46,
@@ -49731,6 +50253,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][31] = 46,
[1][1][RTW89_CN][31] = 127,
[1][1][RTW89_QATAR][31] = 22,
+ [1][1][RTW89_UK][31] = 22,
[1][1][RTW89_FCC][33] = 46,
[1][1][RTW89_ETSI][33] = 22,
[1][1][RTW89_MKK][33] = 46,
@@ -49742,6 +50265,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][33] = 46,
[1][1][RTW89_CN][33] = 127,
[1][1][RTW89_QATAR][33] = 22,
+ [1][1][RTW89_UK][33] = 22,
[1][1][RTW89_FCC][35] = 46,
[1][1][RTW89_ETSI][35] = 22,
[1][1][RTW89_MKK][35] = 46,
@@ -49753,6 +50277,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][35] = 46,
[1][1][RTW89_CN][35] = 127,
[1][1][RTW89_QATAR][35] = 22,
+ [1][1][RTW89_UK][35] = 22,
[1][1][RTW89_FCC][37] = 46,
[1][1][RTW89_ETSI][37] = 127,
[1][1][RTW89_MKK][37] = 46,
@@ -49764,6 +50289,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][37] = 46,
[1][1][RTW89_CN][37] = 127,
[1][1][RTW89_QATAR][37] = 127,
+ [1][1][RTW89_UK][37] = 52,
[1][1][RTW89_FCC][38] = 74,
[1][1][RTW89_ETSI][38] = 16,
[1][1][RTW89_MKK][38] = 127,
@@ -49775,6 +50301,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][38] = 74,
[1][1][RTW89_CN][38] = 62,
[1][1][RTW89_QATAR][38] = 16,
+ [1][1][RTW89_UK][38] = 22,
[1][1][RTW89_FCC][40] = 76,
[1][1][RTW89_ETSI][40] = 16,
[1][1][RTW89_MKK][40] = 127,
@@ -49786,6 +50313,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][40] = 76,
[1][1][RTW89_CN][40] = 62,
[1][1][RTW89_QATAR][40] = 16,
+ [1][1][RTW89_UK][40] = 22,
[1][1][RTW89_FCC][42] = 76,
[1][1][RTW89_ETSI][42] = 16,
[1][1][RTW89_MKK][42] = 127,
@@ -49797,6 +50325,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][42] = 76,
[1][1][RTW89_CN][42] = 62,
[1][1][RTW89_QATAR][42] = 16,
+ [1][1][RTW89_UK][42] = 22,
[1][1][RTW89_FCC][44] = 76,
[1][1][RTW89_ETSI][44] = 16,
[1][1][RTW89_MKK][44] = 127,
@@ -49808,6 +50337,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][44] = 76,
[1][1][RTW89_CN][44] = 62,
[1][1][RTW89_QATAR][44] = 16,
+ [1][1][RTW89_UK][44] = 22,
[1][1][RTW89_FCC][46] = 76,
[1][1][RTW89_ETSI][46] = 16,
[1][1][RTW89_MKK][46] = 127,
@@ -49819,6 +50349,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][46] = 76,
[1][1][RTW89_CN][46] = 62,
[1][1][RTW89_QATAR][46] = 16,
+ [1][1][RTW89_UK][46] = 22,
[2][0][RTW89_FCC][0] = 74,
[2][0][RTW89_ETSI][0] = 46,
[2][0][RTW89_MKK][0] = 50,
@@ -49830,6 +50361,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][0] = 62,
[2][0][RTW89_CN][0] = 46,
[2][0][RTW89_QATAR][0] = 46,
+ [2][0][RTW89_UK][0] = 46,
[2][0][RTW89_FCC][2] = 74,
[2][0][RTW89_ETSI][2] = 46,
[2][0][RTW89_MKK][2] = 50,
@@ -49841,6 +50373,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][2] = 62,
[2][0][RTW89_CN][2] = 46,
[2][0][RTW89_QATAR][2] = 46,
+ [2][0][RTW89_UK][2] = 46,
[2][0][RTW89_FCC][4] = 74,
[2][0][RTW89_ETSI][4] = 46,
[2][0][RTW89_MKK][4] = 50,
@@ -49852,6 +50385,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][4] = 62,
[2][0][RTW89_CN][4] = 46,
[2][0][RTW89_QATAR][4] = 46,
+ [2][0][RTW89_UK][4] = 46,
[2][0][RTW89_FCC][6] = 74,
[2][0][RTW89_ETSI][6] = 46,
[2][0][RTW89_MKK][6] = 50,
@@ -49863,6 +50397,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][6] = 62,
[2][0][RTW89_CN][6] = 46,
[2][0][RTW89_QATAR][6] = 46,
+ [2][0][RTW89_UK][6] = 46,
[2][0][RTW89_FCC][8] = 74,
[2][0][RTW89_ETSI][8] = 46,
[2][0][RTW89_MKK][8] = 50,
@@ -49874,6 +50409,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][8] = 74,
[2][0][RTW89_CN][8] = 46,
[2][0][RTW89_QATAR][8] = 46,
+ [2][0][RTW89_UK][8] = 46,
[2][0][RTW89_FCC][10] = 74,
[2][0][RTW89_ETSI][10] = 46,
[2][0][RTW89_MKK][10] = 50,
@@ -49885,6 +50421,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][10] = 74,
[2][0][RTW89_CN][10] = 46,
[2][0][RTW89_QATAR][10] = 46,
+ [2][0][RTW89_UK][10] = 46,
[2][0][RTW89_FCC][12] = 74,
[2][0][RTW89_ETSI][12] = 46,
[2][0][RTW89_MKK][12] = 50,
@@ -49896,6 +50433,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][12] = 74,
[2][0][RTW89_CN][12] = 46,
[2][0][RTW89_QATAR][12] = 46,
+ [2][0][RTW89_UK][12] = 46,
[2][0][RTW89_FCC][14] = 74,
[2][0][RTW89_ETSI][14] = 46,
[2][0][RTW89_MKK][14] = 50,
@@ -49907,6 +50445,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][14] = 74,
[2][0][RTW89_CN][14] = 46,
[2][0][RTW89_QATAR][14] = 46,
+ [2][0][RTW89_UK][14] = 46,
[2][0][RTW89_FCC][15] = 74,
[2][0][RTW89_ETSI][15] = 46,
[2][0][RTW89_MKK][15] = 70,
@@ -49918,6 +50457,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][15] = 74,
[2][0][RTW89_CN][15] = 127,
[2][0][RTW89_QATAR][15] = 46,
+ [2][0][RTW89_UK][15] = 46,
[2][0][RTW89_FCC][17] = 74,
[2][0][RTW89_ETSI][17] = 46,
[2][0][RTW89_MKK][17] = 70,
@@ -49929,6 +50469,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][17] = 74,
[2][0][RTW89_CN][17] = 127,
[2][0][RTW89_QATAR][17] = 46,
+ [2][0][RTW89_UK][17] = 46,
[2][0][RTW89_FCC][19] = 74,
[2][0][RTW89_ETSI][19] = 46,
[2][0][RTW89_MKK][19] = 70,
@@ -49940,6 +50481,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][19] = 74,
[2][0][RTW89_CN][19] = 127,
[2][0][RTW89_QATAR][19] = 46,
+ [2][0][RTW89_UK][19] = 46,
[2][0][RTW89_FCC][21] = 74,
[2][0][RTW89_ETSI][21] = 46,
[2][0][RTW89_MKK][21] = 70,
@@ -49951,6 +50493,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][21] = 74,
[2][0][RTW89_CN][21] = 127,
[2][0][RTW89_QATAR][21] = 46,
+ [2][0][RTW89_UK][21] = 46,
[2][0][RTW89_FCC][23] = 74,
[2][0][RTW89_ETSI][23] = 46,
[2][0][RTW89_MKK][23] = 70,
@@ -49962,6 +50505,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][23] = 74,
[2][0][RTW89_CN][23] = 127,
[2][0][RTW89_QATAR][23] = 46,
+ [2][0][RTW89_UK][23] = 46,
[2][0][RTW89_FCC][25] = 74,
[2][0][RTW89_ETSI][25] = 46,
[2][0][RTW89_MKK][25] = 70,
@@ -49973,6 +50517,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][25] = 74,
[2][0][RTW89_CN][25] = 127,
[2][0][RTW89_QATAR][25] = 46,
+ [2][0][RTW89_UK][25] = 46,
[2][0][RTW89_FCC][27] = 74,
[2][0][RTW89_ETSI][27] = 46,
[2][0][RTW89_MKK][27] = 70,
@@ -49984,6 +50529,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][27] = 74,
[2][0][RTW89_CN][27] = 127,
[2][0][RTW89_QATAR][27] = 46,
+ [2][0][RTW89_UK][27] = 46,
[2][0][RTW89_FCC][29] = 74,
[2][0][RTW89_ETSI][29] = 46,
[2][0][RTW89_MKK][29] = 70,
@@ -49995,6 +50541,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][29] = 74,
[2][0][RTW89_CN][29] = 127,
[2][0][RTW89_QATAR][29] = 46,
+ [2][0][RTW89_UK][29] = 46,
[2][0][RTW89_FCC][31] = 74,
[2][0][RTW89_ETSI][31] = 46,
[2][0][RTW89_MKK][31] = 70,
@@ -50006,6 +50553,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][31] = 74,
[2][0][RTW89_CN][31] = 127,
[2][0][RTW89_QATAR][31] = 46,
+ [2][0][RTW89_UK][31] = 46,
[2][0][RTW89_FCC][33] = 74,
[2][0][RTW89_ETSI][33] = 46,
[2][0][RTW89_MKK][33] = 70,
@@ -50017,6 +50565,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][33] = 74,
[2][0][RTW89_CN][33] = 127,
[2][0][RTW89_QATAR][33] = 46,
+ [2][0][RTW89_UK][33] = 46,
[2][0][RTW89_FCC][35] = 74,
[2][0][RTW89_ETSI][35] = 46,
[2][0][RTW89_MKK][35] = 70,
@@ -50028,6 +50577,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][35] = 74,
[2][0][RTW89_CN][35] = 127,
[2][0][RTW89_QATAR][35] = 46,
+ [2][0][RTW89_UK][35] = 46,
[2][0][RTW89_FCC][37] = 74,
[2][0][RTW89_ETSI][37] = 127,
[2][0][RTW89_MKK][37] = 70,
@@ -50039,6 +50589,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][37] = 74,
[2][0][RTW89_CN][37] = 127,
[2][0][RTW89_QATAR][37] = 127,
+ [2][0][RTW89_UK][37] = 74,
[2][0][RTW89_FCC][38] = 76,
[2][0][RTW89_ETSI][38] = 28,
[2][0][RTW89_MKK][38] = 127,
@@ -50050,6 +50601,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][38] = 76,
[2][0][RTW89_CN][38] = 76,
[2][0][RTW89_QATAR][38] = 28,
+ [2][0][RTW89_UK][38] = 44,
[2][0][RTW89_FCC][40] = 76,
[2][0][RTW89_ETSI][40] = 28,
[2][0][RTW89_MKK][40] = 127,
@@ -50061,6 +50613,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][40] = 76,
[2][0][RTW89_CN][40] = 76,
[2][0][RTW89_QATAR][40] = 28,
+ [2][0][RTW89_UK][40] = 44,
[2][0][RTW89_FCC][42] = 76,
[2][0][RTW89_ETSI][42] = 28,
[2][0][RTW89_MKK][42] = 127,
@@ -50072,6 +50625,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][42] = 76,
[2][0][RTW89_CN][42] = 76,
[2][0][RTW89_QATAR][42] = 28,
+ [2][0][RTW89_UK][42] = 44,
[2][0][RTW89_FCC][44] = 76,
[2][0][RTW89_ETSI][44] = 28,
[2][0][RTW89_MKK][44] = 127,
@@ -50083,6 +50637,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][44] = 76,
[2][0][RTW89_CN][44] = 76,
[2][0][RTW89_QATAR][44] = 28,
+ [2][0][RTW89_UK][44] = 44,
[2][0][RTW89_FCC][46] = 76,
[2][0][RTW89_ETSI][46] = 28,
[2][0][RTW89_MKK][46] = 127,
@@ -50094,6 +50649,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][46] = 76,
[2][0][RTW89_CN][46] = 76,
[2][0][RTW89_QATAR][46] = 28,
+ [2][0][RTW89_UK][46] = 44,
[2][1][RTW89_FCC][0] = 58,
[2][1][RTW89_ETSI][0] = 32,
[2][1][RTW89_MKK][0] = 38,
@@ -50105,6 +50661,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][0] = 50,
[2][1][RTW89_CN][0] = 32,
[2][1][RTW89_QATAR][0] = 32,
+ [2][1][RTW89_UK][0] = 32,
[2][1][RTW89_FCC][2] = 58,
[2][1][RTW89_ETSI][2] = 32,
[2][1][RTW89_MKK][2] = 38,
@@ -50116,6 +50673,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][2] = 50,
[2][1][RTW89_CN][2] = 32,
[2][1][RTW89_QATAR][2] = 32,
+ [2][1][RTW89_UK][2] = 32,
[2][1][RTW89_FCC][4] = 58,
[2][1][RTW89_ETSI][4] = 32,
[2][1][RTW89_MKK][4] = 38,
@@ -50127,6 +50685,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][4] = 50,
[2][1][RTW89_CN][4] = 32,
[2][1][RTW89_QATAR][4] = 32,
+ [2][1][RTW89_UK][4] = 32,
[2][1][RTW89_FCC][6] = 58,
[2][1][RTW89_ETSI][6] = 32,
[2][1][RTW89_MKK][6] = 38,
@@ -50138,6 +50697,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][6] = 50,
[2][1][RTW89_CN][6] = 32,
[2][1][RTW89_QATAR][6] = 32,
+ [2][1][RTW89_UK][6] = 32,
[2][1][RTW89_FCC][8] = 58,
[2][1][RTW89_ETSI][8] = 32,
[2][1][RTW89_MKK][8] = 38,
@@ -50149,6 +50709,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][8] = 58,
[2][1][RTW89_CN][8] = 32,
[2][1][RTW89_QATAR][8] = 32,
+ [2][1][RTW89_UK][8] = 32,
[2][1][RTW89_FCC][10] = 58,
[2][1][RTW89_ETSI][10] = 32,
[2][1][RTW89_MKK][10] = 38,
@@ -50160,6 +50721,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][10] = 58,
[2][1][RTW89_CN][10] = 32,
[2][1][RTW89_QATAR][10] = 32,
+ [2][1][RTW89_UK][10] = 32,
[2][1][RTW89_FCC][12] = 58,
[2][1][RTW89_ETSI][12] = 32,
[2][1][RTW89_MKK][12] = 38,
@@ -50171,6 +50733,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][12] = 58,
[2][1][RTW89_CN][12] = 32,
[2][1][RTW89_QATAR][12] = 32,
+ [2][1][RTW89_UK][12] = 32,
[2][1][RTW89_FCC][14] = 58,
[2][1][RTW89_ETSI][14] = 32,
[2][1][RTW89_MKK][14] = 38,
@@ -50182,6 +50745,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][14] = 58,
[2][1][RTW89_CN][14] = 32,
[2][1][RTW89_QATAR][14] = 32,
+ [2][1][RTW89_UK][14] = 32,
[2][1][RTW89_FCC][15] = 58,
[2][1][RTW89_ETSI][15] = 32,
[2][1][RTW89_MKK][15] = 58,
@@ -50193,6 +50757,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][15] = 58,
[2][1][RTW89_CN][15] = 127,
[2][1][RTW89_QATAR][15] = 32,
+ [2][1][RTW89_UK][15] = 32,
[2][1][RTW89_FCC][17] = 58,
[2][1][RTW89_ETSI][17] = 32,
[2][1][RTW89_MKK][17] = 58,
@@ -50204,6 +50769,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][17] = 58,
[2][1][RTW89_CN][17] = 127,
[2][1][RTW89_QATAR][17] = 32,
+ [2][1][RTW89_UK][17] = 32,
[2][1][RTW89_FCC][19] = 58,
[2][1][RTW89_ETSI][19] = 32,
[2][1][RTW89_MKK][19] = 58,
@@ -50215,6 +50781,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][19] = 58,
[2][1][RTW89_CN][19] = 127,
[2][1][RTW89_QATAR][19] = 32,
+ [2][1][RTW89_UK][19] = 32,
[2][1][RTW89_FCC][21] = 58,
[2][1][RTW89_ETSI][21] = 32,
[2][1][RTW89_MKK][21] = 58,
@@ -50226,6 +50793,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][21] = 58,
[2][1][RTW89_CN][21] = 127,
[2][1][RTW89_QATAR][21] = 32,
+ [2][1][RTW89_UK][21] = 32,
[2][1][RTW89_FCC][23] = 58,
[2][1][RTW89_ETSI][23] = 32,
[2][1][RTW89_MKK][23] = 58,
@@ -50237,6 +50805,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][23] = 58,
[2][1][RTW89_CN][23] = 127,
[2][1][RTW89_QATAR][23] = 32,
+ [2][1][RTW89_UK][23] = 32,
[2][1][RTW89_FCC][25] = 58,
[2][1][RTW89_ETSI][25] = 32,
[2][1][RTW89_MKK][25] = 58,
@@ -50248,6 +50817,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][25] = 58,
[2][1][RTW89_CN][25] = 127,
[2][1][RTW89_QATAR][25] = 32,
+ [2][1][RTW89_UK][25] = 32,
[2][1][RTW89_FCC][27] = 58,
[2][1][RTW89_ETSI][27] = 32,
[2][1][RTW89_MKK][27] = 58,
@@ -50259,6 +50829,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][27] = 58,
[2][1][RTW89_CN][27] = 127,
[2][1][RTW89_QATAR][27] = 32,
+ [2][1][RTW89_UK][27] = 32,
[2][1][RTW89_FCC][29] = 58,
[2][1][RTW89_ETSI][29] = 32,
[2][1][RTW89_MKK][29] = 58,
@@ -50270,6 +50841,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][29] = 58,
[2][1][RTW89_CN][29] = 127,
[2][1][RTW89_QATAR][29] = 32,
+ [2][1][RTW89_UK][29] = 32,
[2][1][RTW89_FCC][31] = 58,
[2][1][RTW89_ETSI][31] = 32,
[2][1][RTW89_MKK][31] = 58,
@@ -50281,6 +50853,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][31] = 58,
[2][1][RTW89_CN][31] = 127,
[2][1][RTW89_QATAR][31] = 32,
+ [2][1][RTW89_UK][31] = 32,
[2][1][RTW89_FCC][33] = 58,
[2][1][RTW89_ETSI][33] = 32,
[2][1][RTW89_MKK][33] = 58,
@@ -50292,6 +50865,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][33] = 58,
[2][1][RTW89_CN][33] = 127,
[2][1][RTW89_QATAR][33] = 32,
+ [2][1][RTW89_UK][33] = 32,
[2][1][RTW89_FCC][35] = 58,
[2][1][RTW89_ETSI][35] = 32,
[2][1][RTW89_MKK][35] = 58,
@@ -50303,6 +50877,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][35] = 58,
[2][1][RTW89_CN][35] = 127,
[2][1][RTW89_QATAR][35] = 32,
+ [2][1][RTW89_UK][35] = 32,
[2][1][RTW89_FCC][37] = 58,
[2][1][RTW89_ETSI][37] = 127,
[2][1][RTW89_MKK][37] = 58,
@@ -50314,6 +50889,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][37] = 58,
[2][1][RTW89_CN][37] = 127,
[2][1][RTW89_QATAR][37] = 127,
+ [2][1][RTW89_UK][37] = 62,
[2][1][RTW89_FCC][38] = 76,
[2][1][RTW89_ETSI][38] = 16,
[2][1][RTW89_MKK][38] = 127,
@@ -50325,6 +50901,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][38] = 76,
[2][1][RTW89_CN][38] = 64,
[2][1][RTW89_QATAR][38] = 16,
+ [2][1][RTW89_UK][38] = 32,
[2][1][RTW89_FCC][40] = 76,
[2][1][RTW89_ETSI][40] = 16,
[2][1][RTW89_MKK][40] = 127,
@@ -50336,6 +50913,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][40] = 76,
[2][1][RTW89_CN][40] = 64,
[2][1][RTW89_QATAR][40] = 16,
+ [2][1][RTW89_UK][40] = 32,
[2][1][RTW89_FCC][42] = 76,
[2][1][RTW89_ETSI][42] = 16,
[2][1][RTW89_MKK][42] = 127,
@@ -50347,6 +50925,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][42] = 76,
[2][1][RTW89_CN][42] = 64,
[2][1][RTW89_QATAR][42] = 16,
+ [2][1][RTW89_UK][42] = 32,
[2][1][RTW89_FCC][44] = 76,
[2][1][RTW89_ETSI][44] = 16,
[2][1][RTW89_MKK][44] = 127,
@@ -50358,6 +50937,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][44] = 76,
[2][1][RTW89_CN][44] = 64,
[2][1][RTW89_QATAR][44] = 16,
+ [2][1][RTW89_UK][44] = 32,
[2][1][RTW89_FCC][46] = 76,
[2][1][RTW89_ETSI][46] = 16,
[2][1][RTW89_MKK][46] = 127,
@@ -50369,6 +50949,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][46] = 76,
[2][1][RTW89_CN][46] = 64,
[2][1][RTW89_QATAR][46] = 16,
+ [2][1][RTW89_UK][46] = 32,
};
#define DECLARE_DIG_TABLE(name) \
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 48459aba441d..190c4aefb02e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -6,10 +6,50 @@
#include <linux/pci.h>
#include "pci.h"
+#include "reg.h"
#include "rtw8852a.h"
static const struct rtw89_pci_info rtw8852a_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2,
+ .dma_stop1_reg = R_AX_PCIE_DMA_STOP1,
+ .dma_stop2_reg = R_AX_PCIE_DMA_STOP2,
+ .dma_busy1_reg = R_AX_PCIE_DMA_BUSY1,
+ .dma_busy2_reg = R_AX_PCIE_DMA_BUSY2,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
+ .bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
};
static const struct rtw89_driver_info rtw89_8852ae_info = {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 58920e91765e..4fb3de71d032 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2,20 +2,57 @@
/* Copyright(c) 2019-2022 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "reg.h"
#include "rtw8852c.h"
+#include "rtw8852c_rfk.h"
+#include "rtw8852c_table.h"
+#include "util.h"
+
+static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = {
+ {13, 1614, grp_0}, /* ACH 0 */
+ {13, 1614, grp_0}, /* ACH 1 */
+ {13, 1614, grp_0}, /* ACH 2 */
+ {13, 1614, grp_0}, /* ACH 3 */
+ {13, 1614, grp_1}, /* ACH 4 */
+ {13, 1614, grp_1}, /* ACH 5 */
+ {13, 1614, grp_1}, /* ACH 6 */
+ {13, 1614, grp_1}, /* ACH 7 */
+ {13, 1614, grp_0}, /* B0MGQ */
+ {13, 1614, grp_0}, /* B0HIQ */
+ {13, 1614, grp_1}, /* B1MGQ */
+ {13, 1614, grp_1}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852c_hfc_pubcfg_pcie = {
+ 1614, /* Group 0 */
+ 1614, /* Group 1 */
+ 3228, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852c_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852c_hfc_chcfg_pcie, &rtw8852c_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size19, &rtw89_ple_size19,
- &rtw89_wde_qt18, &rtw89_wde_qt18, &rtw89_ple_qt46,
- &rtw89_ple_qt47},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size18,
- &rtw89_ple_size18, &rtw89_wde_qt17, &rtw89_wde_qt17,
- &rtw89_ple_qt44, &rtw89_ple_qt45},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size19,
+ &rtw89_mac_size.ple_size19, &rtw89_mac_size.wde_qt18,
+ &rtw89_mac_size.wde_qt18, &rtw89_mac_size.ple_qt46,
+ &rtw89_mac_size.ple_qt47},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size18,
+ &rtw89_mac_size.ple_size18, &rtw89_mac_size.wde_qt17,
+ &rtw89_mac_size.wde_qt17, &rtw89_mac_size.ple_qt44,
+ &rtw89_mac_size.ple_qt45},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
@@ -49,6 +86,53 @@ static const struct rtw89_reg_def rtw8852c_dcfo_comp = {
R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK
};
+static const struct rtw89_imr_info rtw8852c_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET_V1,
+ .wsec_imr_reg = R_AX_SEC_ERROR_FLAG_IMR,
+ .wsec_imr_set = B_AX_TX_HANG_IMR | B_AX_RX_HANG_IMR,
+ .mpdu_tx_imr_set = B_AX_MPDU_TX_IMR_SET_V1,
+ .mpdu_rx_imr_set = B_AX_MPDU_RX_IMR_SET_V1,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_B0_ERRFLAG_IMR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR_V1,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET_V1,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_B1_ERRFLAG_IMR,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR_V1,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET_V1,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR_V1,
+ .wde_imr_set = B_AX_WDE_IMR_SET_V1,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR_V1,
+ .ple_imr_set = B_AX_PLE_IMR_SET_V1,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR_V1,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V1,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR_V1,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET_V1,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR_V1,
+ .other_disp_imr_set = B_AX_OTHER_DISP_IMR_SET_V1,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR,
+ .bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_V1,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET_V1,
+ .cdma_imr_0_reg = R_AX_RX_ERR_FLAG_IMR,
+ .cdma_imr_0_clr = B_AX_RX_ERR_IMR_CLR_V1,
+ .cdma_imr_0_set = B_AX_RX_ERR_IMR_SET_V1,
+ .cdma_imr_1_reg = R_AX_TX_ERR_FLAG_IMR,
+ .cdma_imr_1_clr = B_AX_TX_ERR_IMR_CLR_V1,
+ .cdma_imr_1_set = B_AX_TX_ERR_IMR_SET_V1,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR_V1,
+ .phy_intf_imr_clr = B_AX_PHYINFO_IMR_CLR_V1,
+ .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET_V1,
+ .rmac_imr_reg = R_AX_RX_ERR_IMR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR_V1,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET_V1,
+ .tmac_imr_reg = R_AX_TRXPTCL_ERROR_INDICA_MASK,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR_V1,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET_V1,
+};
+
+static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg);
+
static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
{
u32 val32;
@@ -268,6 +352,41 @@ static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
}
}
+static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
+{
+ if (high)
+ *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
+ if (low)
+ *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
+
+ return data != 0xff;
+}
+
+static void rtw8852c_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8852c_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool valid = false;
+
+ valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
+ valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_low,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_high,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+
+ gain->offset_valid = valid;
+}
+
static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
{
struct rtw89_efuse *efuse = &rtwdev->efuse;
@@ -278,6 +397,7 @@ static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
efuse->country_code[0] = map->country_code[0];
efuse->country_code[1] = map->country_code[1];
rtw8852c_efuse_parsing_tssi(rtwdev, map);
+ rtw8852c_efuse_parsing_gain_offset(rtwdev, map);
switch (rtwdev->hci.type) {
case RTW89_HCI_TYPE_PCIE:
@@ -446,6 +566,1275 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
rtw8852c_pa_bias_trim(rtwdev);
}
+static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE,
+ mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0, txsc80 = 0;
+ u8 rf_mod_val = 0, chk_rate_mask = 0;
+ u32 txsc;
+
+ switch (param->bandwidth) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsc80 = rtw89_phy_get_txsc(rtwdev, param,
+ RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (param->bandwidth) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = AX_WMAC_RFMOD_160M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
+ FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40) |
+ FIELD_PREP(B_AX_TXSC_80M_MASK, txsc80);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = AX_WMAC_RFMOD_80M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
+ FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = AX_WMAC_RFMOD_40M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = AX_WMAC_RFMOD_20M;
+ txsc = 0;
+ break;
+ }
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsc);
+
+ switch (param->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_AX_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", param->band_type);
+ return;
+ }
+ rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE | B_AX_CHECK_CCK_EN |
+ B_AX_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+}
+
+static const u32 rtw8852c_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8852c_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8852c_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 central_ch,
+ u8 primary_ch, enum rtw89_bandwidth bw)
+{
+ u8 ch_element;
+
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ ch_element = central_ch - 1;
+ } else if (bw == RTW89_CHANNEL_WIDTH_40) {
+ if (primary_ch == 1)
+ ch_element = central_ch - 1 + 2;
+ else
+ ch_element = central_ch - 1 - 2;
+ } else {
+ rtw89_warn(rtwdev, "Invalid BW:%d for CCK\n", bw);
+ return -EINVAL;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_BK_FC0_INV_V1, B_BK_FC0_INV_MSK_V1,
+ rtw8852c_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_CCK_FC0_INV_V1, B_CCK_FC0_INV_MSK_V1,
+ rtw8852c_sco_cck_threshold[ch_element]);
+
+ return 0;
+}
+
+struct rtw8852c_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8852C];
+ u32 gain_a[BB_PATH_NUM_8852C];
+ u32 gain_mask;
+};
+
+static const struct rtw8852c_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x000000ff },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x0000ff00 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x000000ff },
+};
+
+static const struct rtw8852c_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0xff000000 },
+};
+
+struct rtw8852c_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8852C];
+ u32 gain_a[BB_PATH_NUM_8852C];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static
+const struct rtw8852c_bb_gain_bypass bb_gain_bypass_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4BB8, 0x4C7C}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff000000, .gain_mask_a = 0xff},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff, .gain_mask_a = 0xff00},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff0000, .gain_mask_a = 0xff000000},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff000000, .gain_mask_a = 0xff},
+ { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff, .gain_mask_a = 0xff00},
+ { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000},
+};
+
+struct rtw8852c_bb_gain_op1db {
+ struct {
+ u32 lna[BB_PATH_NUM_8852C];
+ u32 tia_lna[BB_PATH_NUM_8852C];
+ u32 mask;
+ } reg[LNA_GAIN_NUM];
+ u32 reg_tia0_lna6[BB_PATH_NUM_8852C];
+ u32 mask_tia0_lna6;
+};
+
+static const struct rtw8852c_bb_gain_op1db bb_gain_op1db_a = {
+ .reg = {
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff00},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff0000},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff000000},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff00},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff0000},
+ },
+ .reg_tia0_lna6 = {0x4674, 0x4758},
+ .mask_tia0_lna6 = 0xff000000,
+};
+
+static enum rtw89_phy_bb_gain_band
+rtw8852c_mapping_gain_band(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH;
+ }
+}
+
+static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 gain_band = rtw8852c_mapping_gain_band(subband);
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_lna[i].gain_g[path];
+ else
+ reg = bb_gain_lna[i].gain_a[path];
+
+ mask = bb_gain_lna[i].gain_mask;
+ val = gain->lna_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ if (subband == RTW89_CH_2G) {
+ reg = bb_gain_bypass_lna[i].gain_g[path];
+ mask = bb_gain_bypass_lna[i].gain_mask_g;
+ } else {
+ reg = bb_gain_bypass_lna[i].gain_a[path];
+ mask = bb_gain_bypass_lna[i].gain_mask_a;
+ }
+
+ val = gain->lna_gain_bypass[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ if (subband != RTW89_CH_2G) {
+ reg = bb_gain_op1db_a.reg[i].lna[path];
+ mask = bb_gain_op1db_a.reg[i].mask;
+ val = gain->lna_op1db[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ reg = bb_gain_op1db_a.reg[i].tia_lna[path];
+ mask = bb_gain_op1db_a.reg[i].mask;
+ val = gain->tia_lna_op1db[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+ }
+
+ if (subband != RTW89_CH_2G) {
+ reg = bb_gain_op1db_a.reg_tia0_lna6[path];
+ mask = bb_gain_op1db_a.mask_tia0_lna6;
+ val = gain->tia_lna_op1db[gain_band][path][7];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_tia[i].gain_g[path];
+ else
+ reg = bb_gain_tia[i].gain_a[path];
+
+ mask = bb_gain_tia[i].gain_mask;
+ val = gain->tia_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+}
+
+static
+const u8 rtw8852c_ch_base_table[16] = {1, 0xff,
+ 36, 100, 132, 149, 0xff,
+ 1, 33, 65, 97, 129, 161, 193, 225, 0xff};
+#define RTW8852C_CH_BASE_IDX_2G 0
+#define RTW8852C_CH_BASE_IDX_5G_FIRST 2
+#define RTW8852C_CH_BASE_IDX_5G_LAST 5
+#define RTW8852C_CH_BASE_IDX_6G_FIRST 7
+#define RTW8852C_CH_BASE_IDX_6G_LAST 14
+
+#define RTW8852C_CH_BASE_IDX_MASK GENMASK(7, 4)
+#define RTW8852C_CH_OFFSET_MASK GENMASK(3, 0)
+
+static u8 rtw8852c_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
+{
+ u8 chan_idx;
+ u8 last, first;
+ u8 idx;
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, RTW8852C_CH_BASE_IDX_2G) |
+ FIELD_PREP(RTW8852C_CH_OFFSET_MASK, central_ch);
+ return chan_idx;
+ case RTW89_BAND_5G:
+ first = RTW8852C_CH_BASE_IDX_5G_FIRST;
+ last = RTW8852C_CH_BASE_IDX_5G_LAST;
+ break;
+ case RTW89_BAND_6G:
+ first = RTW8852C_CH_BASE_IDX_6G_FIRST;
+ last = RTW8852C_CH_BASE_IDX_6G_LAST;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Unsupported band %d\n", band);
+ return 0;
+ }
+
+ for (idx = last; idx >= first; idx--)
+ if (central_ch >= rtw8852c_ch_base_table[idx])
+ break;
+
+ if (idx < first) {
+ rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
+ return 0;
+ }
+
+ chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, idx) |
+ FIELD_PREP(RTW8852C_CH_OFFSET_MASK,
+ (central_ch - rtw8852c_ch_base_table[idx]) >> 1);
+ return chan_idx;
+}
+
+static void rtw8852c_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
+ u8 *ch, enum nl80211_band *band)
+{
+ u8 idx, offset;
+
+ idx = FIELD_GET(RTW8852C_CH_BASE_IDX_MASK, chan_idx);
+ offset = FIELD_GET(RTW8852C_CH_OFFSET_MASK, chan_idx);
+
+ if (idx == RTW8852C_CH_BASE_IDX_2G) {
+ *band = NL80211_BAND_2GHZ;
+ *ch = offset;
+ return;
+ }
+
+ *band = idx <= RTW8852C_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
+ *ch = rtw8852c_ch_base_table[idx] + (offset << 1);
+}
+
+static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ R_PATH1_G_TIA0_LNA6_OP1DB_V1};
+ static const u32 rpl_mask[2] = {B_RPL_PATHA_MASK, B_RPL_PATHB_MASK};
+ static const u32 rpl_tb_mask[2] = {B_RSSI_M_PATHA_MASK, B_RSSI_M_PATHB_MASK};
+ struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s32 offset_q0, offset_base_q4;
+ s32 tmp = 0;
+
+ if (!efuse_gain->offset_valid)
+ return;
+
+ if (rtwdev->dbcc_en && path == RF_PATH_B)
+ phy_idx = RTW89_PHY_1;
+
+ if (param->band_type == RTW89_BAND_2G) {
+ offset_q0 = efuse_gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK];
+ offset_base_q4 = efuse_gain->offset_base[phy_idx];
+
+ tmp = clamp_t(s32, (-offset_q0 << 3) + (offset_base_q4 >> 1),
+ S8_MIN >> 1, S8_MAX >> 1);
+ rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f);
+ }
+
+ switch (param->subband_type) {
+ default:
+ case RTW89_CH_2G:
+ gain_band = RTW89_GAIN_OFFSET_2G_OFDM;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ gain_band = RTW89_GAIN_OFFSET_5G_LOW;
+ break;
+ case RTW89_CH_5G_BAND_3:
+ gain_band = RTW89_GAIN_OFFSET_5G_MID;
+ break;
+ case RTW89_CH_5G_BAND_4:
+ gain_band = RTW89_GAIN_OFFSET_5G_HIGH;
+ break;
+ }
+
+ offset_q0 = -efuse_gain->offset[path][gain_band];
+ offset_base_q4 = efuse_gain->offset_base[phy_idx];
+
+ tmp = (offset_q0 << 2) + (offset_base_q4 >> 2);
+ tmp = clamp_t(s32, -tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], B_PATH0_R_G_OFST_MASK, tmp & 0xff);
+
+ tmp = clamp_t(s32, offset_q0 << 4, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_RPL_PATHAB, rpl_mask[path], tmp & 0xff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSSI_M_PATHAB, rpl_tb_mask[path], tmp & 0xff, phy_idx);
+}
+
+static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 sco;
+ u16 central_freq = param->center_freq;
+ u8 central_ch = param->center_chan;
+ u8 band = param->band_type;
+ u8 subband = param->subband_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ if (phy_idx == RTW89_PHY_0) {
+ /* Path A */
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_A);
+ rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_A);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 0,
+ phy_idx);
+ /* Path B */
+ if (!rtwdev->dbcc_en) {
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev,
+ R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev,
+ R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 0, phy_idx);
+ rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ } else {
+ if (is_2g)
+ rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ else
+ rtw89_phy_write32_set(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ }
+ /* SCO compensate FC setting */
+ rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1,
+ central_freq, phy_idx);
+ /* round_up((1/fc0)*pow(2,18)) */
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco,
+ phy_idx);
+ } else {
+ /* Path B */
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 0, phy_idx);
+ /* SCO compensate FC setting */
+ rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1,
+ central_freq, phy_idx);
+ /* round_up((1/fc0)*pow(2,18)) */
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco,
+ phy_idx);
+ }
+ /* CCK parameters */
+ if (band == RTW89_BAND_2G) {
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1,
+ B_PCOEFF01_MSK_V1, 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1,
+ B_PCOEFF23_MSK_V1, 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1,
+ B_PCOEFF45_MSK_V1, 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1,
+ B_PCOEFF67_MSK_V1, 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1,
+ B_PCOEFF89_MSK_V1, 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1,
+ B_PCOEFFAB_MSK_V1, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1,
+ B_PCOEFFCD_MSK_V1, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1,
+ B_PCOEFFEF_MSK_V1, 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1,
+ B_PCOEFF01_MSK_V1, 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1,
+ B_PCOEFF23_MSK_V1, 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1,
+ B_PCOEFF45_MSK_V1, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1,
+ B_PCOEFF67_MSK_V1, 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1,
+ B_PCOEFF89_MSK_V1, 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1,
+ B_PCOEFFAB_MSK_V1, 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1,
+ B_PCOEFFCD_MSK_V1, 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1,
+ B_PCOEFFEF_MSK_V1, 0xffdff5);
+ }
+ }
+
+ chan_idx = rtw8852c_encode_chan_idx(rtwdev, param->primary_chan, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void rtw8852c_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+}
+
+static void rtw8852c_edcca_per20_bitmap_sifs(struct rtw89_dev *rtwdev, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0xff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx);
+ }
+}
+
+static void
+rtw8852c_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 mod_sbw = 0;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ if (bw == RTW89_CHANNEL_WIDTH_5)
+ mod_sbw = 0x1;
+ else if (bw == RTW89_CHANNEL_WIDTH_10)
+ mod_sbw = 0x2;
+ else if (bw == RTW89_CHANNEL_WIDTH_20)
+ mod_sbw = 0x0;
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW,
+ mod_sbw, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, 0x0,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x2,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xd);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xd);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x3,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xb);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40) {
+ rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1,
+ B_RX_BW40_2XFFT_EN_MSK_V1, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1,
+ B_RX_BW40_2XFFT_EN_MSK_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 0, phy_idx);
+ }
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B);
+ } else {
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B);
+ }
+
+ rtw8852c_edcca_per20_bitmap_sifs(rtwdev, bw, phy_idx);
+}
+
+static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param)
+{
+ u8 center_chan = param->center_chan;
+ u8 bw = param->bandwidth;
+
+ switch (param->band_type) {
+ case RTW89_BAND_2G:
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ if (center_chan >= 5 && center_chan <= 8)
+ return 2440;
+ if (center_chan == 13)
+ return 2480;
+ } else if (bw == RTW89_CHANNEL_WIDTH_40) {
+ if (center_chan >= 3 && center_chan <= 10)
+ return 2440;
+ }
+ break;
+ case RTW89_BAND_5G:
+ if (center_chan == 151 || center_chan == 153 ||
+ center_chan == 155 || center_chan == 163)
+ return 5760;
+ break;
+ case RTW89_BAND_6G:
+ if (center_chan == 195 || center_chan == 197 ||
+ center_chan == 199 || center_chan == 207)
+ return 6920;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8852c_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 spur_freq;
+ s32 freq_diff, csi_idx, csi_tone_idx;
+
+ spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - param->center_freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8852c_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x4C14, 0xFF},
+ .notch1_frac_idx = {0x4C14, 0xC00},
+ .notch1_en = {0x4C14, 0x1000},
+ .notch2_idx = {0x4C20, 0xFF},
+ .notch2_frac_idx = {0x4C20, 0xC00},
+ .notch2_en = {0x4C20, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x4CD8, 0xFF},
+ .notch1_frac_idx = {0x4CD8, 0xC00},
+ .notch1_en = {0x4CD8, 0x1000},
+ .notch2_idx = {0x4CE4, 0xFF},
+ .notch2_frac_idx = {0x4CE4, 0xC00},
+ .notch2_en = {0x4CE4, 0x1000},
+ },
+};
+
+static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8852c_nbi_reg_def[path];
+ u32 spur_freq, fc;
+ s32 freq_diff;
+ s32 nbi_idx, nbi_tone_idx;
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ bool notch2_chk = false;
+
+ spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ return;
+ }
+
+ fc = param->center_freq;
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq && param->center_chan < param->primary_chan) ||
+ (fc < spur_freq && param->center_chan > param->primary_chan))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5, &nbi_frac_idx);
+
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (param->bandwidth == RTW89_CHANNEL_WIDTH_40) ? 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx = s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 1);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 1);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0);
+ }
+}
+
+static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx);
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ } else {
+ rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ }
+}
+
+static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 pri_ch = param->primary_chan;
+ bool mask_5m_low;
+ bool mask_5m_en;
+
+ switch (param->bandwidth) {
+ case RTW89_CHANNEL_WIDTH_40:
+ mask_5m_en = true;
+ mask_5m_low = pri_ch == 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ mask_5m_en = ((pri_ch == 3) || (pri_ch == 4));
+ mask_5m_low = pri_ch == 4;
+ break;
+ default:
+ mask_5m_en = false;
+ mask_5m_low = false;
+ break;
+ }
+
+ if (!mask_5m_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT,
+ B_ASSIGN_SBD_OPT_EN, 0x0, phy_idx);
+ } else {
+ if (mask_5m_low) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x0);
+ }
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT, B_ASSIGN_SBD_OPT_EN, 0x1, phy_idx);
+ }
+}
+
+static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ /*HW SI reset*/
+ rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG,
+ 0x7);
+ rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG,
+ 0x7);
+
+ udelay(1);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0,
+ phy_idx);
+ /*HW SI reset*/
+ rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG,
+ 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG,
+ 0x0);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+}
+
+static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+ if (hal->current_band_type == RTW89_BAND_2G)
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0,
+ phy_idx);
+ }
+}
+
+static void rtw8852c_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_bb_reset_all(rtwdev, phy_idx);
+}
+
+static
+void rtw8852c_bb_gpio_trsw(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 tx_path_en, u8 trsw_tx,
+ u8 trsw_rx, u8 trsw, u8 trsw_b)
+{
+ static const u32 path_cr_bases[] = {0x5868, 0x7868};
+ u32 mask_ofst = 16;
+ u32 cr;
+ u32 val;
+
+ if (path >= ARRAY_SIZE(path_cr_bases))
+ return;
+
+ cr = path_cr_bases[path];
+
+ mask_ofst += (tx_path_en * 4 + trsw_tx * 2 + trsw_rx) * 2;
+ val = FIELD_PREP(B_P0_TRSW_A, trsw) | FIELD_PREP(B_P0_TRSW_B, trsw_b);
+
+ rtw89_phy_write32_mask(rtwdev, cr, (B_P0_TRSW_A | B_P0_TRSW_B) << mask_ofst, val);
+}
+
+enum rtw8852c_rfe_src {
+ PAPE_RFM,
+ TRSW_RFM,
+ LNAON_RFM,
+};
+
+static
+void rtw8852c_bb_gpio_rfm(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw8852c_rfe_src src, u8 dis_tx_gnt_wl,
+ u8 active_tx_opt, u8 act_bt_en, u8 rfm_output_val)
+{
+ static const u32 path_cr_bases[] = {0x5894, 0x7894};
+ static const u32 masks[] = {0, 8, 16};
+ u32 mask, mask_ofst;
+ u32 cr;
+ u32 val;
+
+ if (src >= ARRAY_SIZE(masks) || path >= ARRAY_SIZE(path_cr_bases))
+ return;
+
+ mask_ofst = masks[src];
+ cr = path_cr_bases[path];
+
+ val = FIELD_PREP(B_P0_RFM_DIS_WL, dis_tx_gnt_wl) |
+ FIELD_PREP(B_P0_RFM_TX_OPT, active_tx_opt) |
+ FIELD_PREP(B_P0_RFM_BT_EN, act_bt_en) |
+ FIELD_PREP(B_P0_RFM_OUT, rfm_output_val);
+ mask = 0xff << mask_ofst;
+
+ rtw89_phy_write32_mask(rtwdev, cr, mask, val);
+}
+
+static void rtw8852c_bb_gpio_init(struct rtw89_dev *rtwdev)
+{
+ static const u32 cr_bases[] = {0x5800, 0x7800};
+ u32 addr;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(cr_bases); i++) {
+ addr = cr_bases[i];
+ rtw89_phy_write32_set(rtwdev, (addr | 0x68), B_P0_TRSW_A);
+ rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_X);
+ rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_SO_A2);
+ rtw89_phy_write32(rtwdev, (addr | 0x80), 0x77777777);
+ rtw89_phy_write32(rtwdev, (addr | 0x84), 0x77777777);
+ }
+
+ rtw89_phy_write32(rtwdev, R_RFE_E_A2, 0xffffffff);
+ rtw89_phy_write32(rtwdev, R_RFE_O_SEL_A2, 0);
+ rtw89_phy_write32(rtwdev, R_RFE_SEL0_A2, 0);
+ rtw89_phy_write32(rtwdev, R_RFE_SEL32_A2, 0);
+
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 1, 1, 0);
+
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 1, 1, 0);
+
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, PAPE_RFM, 0, 0, 0, 0x0);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, TRSW_RFM, 0, 0, 0, 0x4);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, LNAON_RFM, 0, 0, 0, 0x8);
+
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, PAPE_RFM, 0, 0, 0, 0x0);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, TRSW_RFM, 0, 0, 0, 0x4);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, LNAON_RFM, 0, 0, 0, 0x8);
+}
+
+static void rtw8852c_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT,
+ B_DBCC_80P80_SEL_EVM_RPT_EN);
+ rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT2,
+ B_DBCC_80P80_SEL_EVM_RPT2_EN);
+
+ rtw8852c_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+ rtw8852c_bb_gpio_init(rtwdev);
+
+ /* read these registers after loading BB parameters */
+ gain->offset_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP, B_RPL_BIAS_COMP_MASK);
+ gain->offset_base[RTW89_PHY_1] =
+ rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP1, B_RPL_BIAS_COMP1_MASK);
+}
+
+static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = param->band_type == RTW89_BAND_2G;
+ u8 pri_ch_idx = param->pri_ch_idx;
+ u32 mask, reg;
+ u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
+ B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
+
+ if (param->band_type == RTW89_BAND_2G)
+ rtw8852c_ctrl_sco_cck(rtwdev, param->center_chan,
+ param->primary_chan, param->bandwidth);
+
+ rtw8852c_ctrl_ch(rtwdev, param, phy_idx);
+ rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF,
+ B_PD_ARBITER_OFF, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 1);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF,
+ B_PD_ARBITER_OFF, 0x1, phy_idx);
+ }
+
+ rtw8852c_spur_elimination(rtwdev, param, phy_idx);
+ rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G);
+ rtw8852c_5m_mask(rtwdev, param, phy_idx);
+
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ rtwdev->hal.cv != CHIP_CAV) {
+ rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ,
+ B_P80_AT_HIGH_FREQ, 0x0, phy_idx);
+ reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP,
+ phy_idx);
+ if (param->primary_chan > param->center_chan) {
+ rtw89_phy_write32_mask(rtwdev,
+ R_P80_AT_HIGH_FREQ_RU_ALLOC,
+ ru_alloc_msk[phy_idx], 1);
+ rtw89_write32_mask(rtwdev, reg,
+ B_P80_AT_HIGH_FREQ_BB_WRP, 1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev,
+ R_P80_AT_HIGH_FREQ_RU_ALLOC,
+ ru_alloc_msk[phy_idx], 0);
+ rtw89_write32_mask(rtwdev, reg,
+ B_P80_AT_HIGH_FREQ_BB_WRP, 0);
+ }
+ }
+
+ if (param->band_type == RTW89_BAND_6G &&
+ param->bandwidth == RTW89_CHANNEL_WIDTH_160)
+ rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
+ B_CDD_EVM_CHK_EN, 0, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
+ B_CDD_EVM_CHK_EN, 1, phy_idx);
+
+ if (!rtwdev->dbcc_en) {
+ mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3);
+ mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3);
+ } else {
+ if (phy_idx == RTW89_PHY_0) {
+ mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3);
+ } else {
+ mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3);
+ }
+ }
+
+ rtw8852c_bb_reset_all(rtwdev, phy_idx);
+}
+
+static void rtw8852c_set_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *params)
+{
+ rtw8852c_set_channel_mac(rtwdev, params, RTW89_MAC_0);
+ rtw8852c_set_channel_bb(rtwdev, params, RTW89_PHY_0);
+ rtw8852c_set_channel_rf(rtwdev, params, RTW89_PHY_0);
+}
+
+static void rtw8852c_dfs_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 0);
+}
+
+static void rtw8852c_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0xf);
+}
+
+static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p)
+{
+ u8 phy_idx = RTW89_PHY_0;
+
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw8852c_dfs_en(rtwdev, false);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852c_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852c_bb_reset_en(rtwdev, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw8852c_adc_en(rtwdev, true);
+ rtw8852c_dfs_en(rtwdev, true);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852c_bb_reset_en(rtwdev, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ }
+}
+
+static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(mcc_info, 0, sizeof(*mcc_info));
+ rtw8852c_lck_init(rtwdev);
+
+ rtw8852c_rck(rtwdev);
+ rtw8852c_dack(rtwdev);
+ rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
+}
+
+static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+
+ rtw8852c_rx_dck(rtwdev, phy_idx, false);
+ rtw8852c_iqk(rtwdev, phy_idx);
+ rtw8852c_tssi(rtwdev, phy_idx);
+ rtw8852c_dpk(rtwdev, phy_idx);
+ rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
+}
+
+static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_tssi_scan(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+}
+
+static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_dpk_track(rtwdev);
+ rtw8852c_lck_track(rtwdev);
+}
+
+static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ s8 ofst_int = 0;
+ u8 base_cw_0db = 0x27;
+ u16 tssi_16dbm_cw = 0x12c;
+ s16 pwr_s10_3 = 0;
+ s16 rf_pwr_cw = 0;
+ u16 bb_pwr_cw = 0;
+ u32 pwr_cw = 0;
+ u32 tssi_ofst_cw = 0;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
+ rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return (tssi_ofst_cw << 18) | (pwr_cw << 9) | (ref & GENMASK(8, 0));
+}
+
static
void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx)
@@ -481,29 +1870,932 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
}
+static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800};
+ const u32 mask = 0x7FFFFFF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ GENMASK(27, 10), 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch = rtwdev->hal.current_channel;
+ static const u8 rs[] = {
+ RTW89_RS_CCK,
+ RTW89_RS_OFDM,
+ RTW89_RS_MCS,
+ RTW89_RS_HEDCM,
+ };
+ s8 tmp;
+ u8 i, j;
+ u32 val, shf, addr = R_AX_PWR_BY_RATE;
+ struct rtw89_rate_desc cur;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr byrate with ch=%d\n", ch);
+
+ for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (i = 0; i < ARRAY_SIZE(rs); i++) {
+ if (cur.nss >= rtw89_rs_nss_max[rs[i]])
+ continue;
+
+ val = 0;
+ cur.rs = rs[i];
+
+ for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
+ cur.idx = j;
+ shf = (j % 4) * 8;
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ val |= (tmp << shf);
+
+ if ((j + 1) % 4)
+ continue;
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ val = 0;
+ addr += 4;
+ }
+ }
+ }
+}
+
+static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rate_desc desc = {
+ .nss = RTW89_NSS_1,
+ .rs = RTW89_RS_OFFSET,
+ };
+ u32 val = 0;
+ s8 v;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
+
+ for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ val |= ((v & 0xf) << (4 * desc.idx));
+ }
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
+ GENMASK(19, 0), val);
+}
+
+static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ u8 tx_shape_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __DFIR_CFG_MASK 0xffffff
+#define __DFIR_CFG_NR 8
+#define __DECL_DFIR_VAR(_prefix, _name, _val...) \
+ static const u32 _prefix ## _ ## _name[] = {_val}; \
+ static_assert(ARRAY_SIZE(_prefix ## _ ## _name) == __DFIR_CFG_NR)
+#define __DECL_DFIR_PARAM(_name, _val...) __DECL_DFIR_VAR(param, _name, _val)
+#define __DECL_DFIR_ADDR(_name, _val...) __DECL_DFIR_VAR(addr, _name, _val)
+
+ __DECL_DFIR_PARAM(flat,
+ 0x003D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
+ 0x00F86F9A, 0x00FAEF92, 0x00FE5FCC, 0x00FFDFF5);
+ __DECL_DFIR_PARAM(sharp,
+ 0x003D83FF, 0x002C636A, 0x0013F204, 0x00008090,
+ 0x00F87FB0, 0x00F99F83, 0x00FDBFBA, 0x00003FF5);
+ __DECL_DFIR_PARAM(sharp_14,
+ 0x003B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
+ 0x00FD8F92, 0x0002D011, 0x0001C02C, 0x00FFF00A);
+ __DECL_DFIR_ADDR(filter,
+ 0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
+ 0x45C4, 0x45C8);
+ u8 ch = rtwdev->hal.current_channel;
+ const u32 *param;
+ int i;
+
+ if (ch > 14) {
+ rtw89_warn(rtwdev,
+ "set tx shape dfir by unknown ch: %d on 2G\n", ch);
+ return;
+ }
+
+ if (ch == 14)
+ param = param_sharp_14;
+ else
+ param = tx_shape_idx == 0 ? param_flat : param_sharp;
+
+ for (i = 0; i < __DFIR_CFG_NR; i++) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "set tx shape dfir: 0x%x: 0x%x\n", addr_filter[i],
+ param[i]);
+ rtw89_phy_write32_idx(rtwdev, addr_filter[i], __DFIR_CFG_MASK,
+ param[i], phy_idx);
+ }
+
+#undef __DECL_DFIR_ADDR
+#undef __DECL_DFIR_PARAM
+#undef __DECL_DFIR_VAR
+#undef __DFIR_CFG_NR
+#undef __DFIR_CFG_MASK
+}
+
+static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 band = rtwdev->hal.current_band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd];
+ u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
+
+ if (band == RTW89_BAND_2G)
+ rtw8852c_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+
+ rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
+ (enum rtw89_mac_idx)phy_idx,
+ tx_shape_ofdm);
+}
+
+static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_PAGE_SIZE 40
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+ struct rtw89_txpwr_limit lmt[NTX_NUM_8852C];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j, k;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852C; i++) {
+ rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt[i] + j;
+ val = 0;
+
+ for (k = 0; k < 4; k++)
+ val |= (ptr[k] << (8 * k));
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+#undef __MAC_TXPWR_LMT_PAGE_SIZE
+}
+
+static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+ struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j, k;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852C; i++) {
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_RU_LMT + j +
+ __MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt_ru[i] + j;
+ val = 0;
+
+ for (k = 0; k < 4; k++)
+ val |= (ptr[k] << (8 * k));
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+
+#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
+}
+
+static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_offset(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_tx_shape(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_limit(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_set_txpwr_ref(rtwdev, RTW89_PHY_0);
+}
+
+static void
+rtw8852c_init_tssi_ctrl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ static const struct rtw89_reg2_def ctrl_ini[] = {
+ {0xD938, 0x00010100},
+ {0xD93C, 0x0500D500},
+ {0xD940, 0x00000500},
+ {0xD944, 0x00000005},
+ {0xD94C, 0x00220000},
+ {0xD950, 0x00030000},
+ };
+ u32 addr;
+ int i;
+
+ for (addr = R_AX_TSSI_CTRL_HEAD; addr <= R_AX_TSSI_CTRL_TAIL; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_ini); i++)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, ctrl_ini[i].addr,
+ ctrl_ini[i].data);
+
+ rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
+ (enum rtw89_mac_idx)phy_idx,
+ RTW89_TSSI_BANDEDGE_FLAT);
+}
+
+static int
+rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ rtw8852c_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
+ RTW89_MAC_1 :
+ RTW89_MAC_0);
+ rtw8852c_init_tssi_ctrl(rtwdev, phy_idx);
+
+ return 0;
+}
+
+static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 2,
+ RTW89_PHY_1);
+
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0,
+ 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1,
+ 1);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0, 2,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1, 2,
+ RTW89_PHY_1);
+
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+
+ rtw89_phy_write32_idx(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0, RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0, RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_USER_MAX, 1,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0,
+ RTW89_PHY_1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ } else {
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 3);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 2);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 3);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 1);
+ rtw8852c_ctrl_btg(rtwdev, hal->current_band_type == RTW89_BAND_2G);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 3);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8);
+ }
+}
+
+static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
+ enum rtw89_mac_idx mac_idx)
+{
+ struct rtw89_reg2_def path_com[] = {
+ {R_AX_PATH_COM0, AX_PATH_COM0_DFVAL},
+ {R_AX_PATH_COM1, AX_PATH_COM1_DFVAL},
+ {R_AX_PATH_COM2, AX_PATH_COM2_DFVAL},
+ {R_AX_PATH_COM3, AX_PATH_COM3_DFVAL},
+ {R_AX_PATH_COM4, AX_PATH_COM4_DFVAL},
+ {R_AX_PATH_COM5, AX_PATH_COM5_DFVAL},
+ {R_AX_PATH_COM6, AX_PATH_COM6_DFVAL},
+ {R_AX_PATH_COM7, AX_PATH_COM7_DFVAL},
+ {R_AX_PATH_COM8, AX_PATH_COM8_DFVAL},
+ {R_AX_PATH_COM9, AX_PATH_COM9_DFVAL},
+ {R_AX_PATH_COM10, AX_PATH_COM10_DFVAL},
+ {R_AX_PATH_COM11, AX_PATH_COM11_DFVAL},
+ };
+ u32 addr;
+ u32 reg;
+ u8 cr_size = ARRAY_SIZE(path_com);
+ u8 i = 0;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_1);
+
+ for (addr = R_AX_MACID_ANT_TABLE;
+ addr <= R_AX_MACID_ANT_TABLE_LAST; addr += 4) {
+ reg = rtw89_mac_reg_by_idx(addr, mac_idx);
+ rtw89_write32(rtwdev, reg, 0);
+ }
+
+ if (tx_path == RF_PATH_A) {
+ path_com[0].data = AX_PATH_COM0_PATHA;
+ path_com[1].data = AX_PATH_COM1_PATHA;
+ path_com[2].data = AX_PATH_COM2_PATHA;
+ path_com[7].data = AX_PATH_COM7_PATHA;
+ path_com[8].data = AX_PATH_COM8_PATHA;
+ } else if (tx_path == RF_PATH_B) {
+ path_com[0].data = AX_PATH_COM0_PATHB;
+ path_com[1].data = AX_PATH_COM1_PATHB;
+ path_com[2].data = AX_PATH_COM2_PATHB;
+ path_com[7].data = AX_PATH_COM7_PATHB;
+ path_com[8].data = AX_PATH_COM8_PATHB;
+ } else if (tx_path == RF_PATH_AB) {
+ path_com[0].data = AX_PATH_COM0_PATHAB;
+ path_com[1].data = AX_PATH_COM1_PATHAB;
+ path_com[2].data = AX_PATH_COM2_PATHAB;
+ path_com[7].data = AX_PATH_COM7_PATHAB;
+ path_com[8].data = AX_PATH_COM8_PATHAB;
+ } else {
+ rtw89_warn(rtwdev, "[Invalid Tx Path]Tx Path: %d\n", tx_path);
+ return;
+ }
+
+ for (i = 0; i < cr_size; i++) {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "0x%x = 0x%x\n",
+ path_com[i].addr, path_com[i].data);
+ reg = rtw89_mac_reg_by_idx(path_com[i].addr, mac_idx);
+ rtw89_write32(rtwdev, reg, path_com[i].data);
+ }
+}
+
+static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB);
+
+ if (hal->rx_nss == 1) {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw8852c_ctrl_tx_path_tmac(rtwdev, RF_PATH_AB, RTW89_MAC_0);
+}
+
+static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->cv = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+
+ if (module->rfe_type > 0)
+ module->ant.num = (module->rfe_type % 2 ? 2 : 3);
+ else
+ module->ant.num = 2;
+
+ module->ant.diversity = 0;
+ module->ant.isolation = 10;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+}
+
+static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
+{
+ if (btg) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x20);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P2, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN,
+ B_BT_DYN_DC_EST_EN_MSK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0);
+ }
+}
+
+static
+void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init_v1(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (module->ant.type == BTC_ANT_SHARED) {
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_AX_BT_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda10[1:0] = 2b'11 */
+ rtw89_write32_set(rtwdev,
+ R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN |
+ B_AX_BT_CNT_RST_V1);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void rtw8852c_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap = 0;
+ u32 reg = 0;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_COEX_WL_REQ;
+ bitmap = B_BTC_RSP_ACK_HI;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_BTC_COEX_WL_REQ;
+ bitmap = B_BTC_TX_BCN_HI;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+union rtw8852c_btc_wl_txpwr_ctrl {
+ u32 txpwr_val;
+ struct {
+ union {
+ u16 ctrl_all_time;
+ struct {
+ s16 data:9;
+ u16 rsvd:6;
+ u16 flag:1;
+ } all_time;
+ };
+ union {
+ u16 ctrl_gnt_bt;
+ struct {
+ s16 data:9;
+ u16 rsvd:7;
+ } gnt_bt;
+ };
+ };
+} __packed;
+
+static void
+rtw8852c_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ union rtw8852c_btc_wl_txpwr_ctrl arg = { .txpwr_val = txpwr_val };
+ s32 val;
+
+#define __write_ctrl(_reg, _msk, _val, _en, _cond) \
+do { \
+ const typeof(_msk) __msk = _msk; \
+ const typeof(_en) __en = _en; \
+ u32 _wrt = FIELD_PREP(__msk, _val); \
+ BUILD_BUG_ON((__msk & __en) != 0); \
+ if (_cond) \
+ _wrt |= __en; \
+ else \
+ _wrt &= ~__en; \
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \
+ __msk | __en, _wrt); \
+} while (0)
+
+ switch (arg.ctrl_all_time) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.all_time.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_RATE_CTRL, B_AX_FORCE_PWR_BY_RATE_VALUE_MASK,
+ val, B_AX_FORCE_PWR_BY_RATE_EN,
+ arg.ctrl_all_time != 0xffff);
+
+ switch (arg.ctrl_gnt_bt) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.gnt_bt.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_COEXT_CTRL, B_AX_TXAGC_BT_MASK, val,
+ B_AX_TXAGC_BT_EN, arg.ctrl_gnt_bt != 0xffff);
+
+#undef __write_ctrl
+}
+
+static
+s8 rtw8852c_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static
+void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+
+ /* fix LNA2 = level-5 for BT ACI issue at BTG */
+ if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0)
+ dm->trx_para_level = 1;
+}
+
+static
+void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_BTC_BT_CNT_HIGH);
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
+ cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
+
+ val = rtw89_read32(rtwdev, R_BTC_BT_CNT_LOW);
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val);
+ cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val);
+
+ /* clock-gate off before reset counter*/
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+ rtw89_write32_clr(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
+ rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+}
+
+static
+void rtw8852c_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x620);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0x179c);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0x208);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw8852c_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ s8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = rx_power[path];
+ }
+ if (phy_ppdu->valid)
+ rtw8852c_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S0_LDO_VSEL_F_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S1_LDO_VSEL_F_MASK, 0x1);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL0, 0x7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x6c, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xc7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xc7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL3, 0xd, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+}
+
static const struct rtw89_chip_ops rtw8852c_chip_ops = {
+ .enable_bb_rf = rtw8852c_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852c_mac_disable_bb_rf,
+ .bb_reset = rtw8852c_bb_reset,
+ .bb_sethw = rtw8852c_bb_sethw,
+ .set_channel = rtw8852c_set_channel,
+ .set_channel_help = rtw8852c_set_channel_help,
.read_efuse = rtw8852c_read_efuse,
.read_phycap = rtw8852c_read_phycap,
+ .rfk_init = rtw8852c_rfk_init,
+ .rfk_channel = rtw8852c_rfk_channel,
+ .rfk_band_changed = rtw8852c_rfk_band_changed,
+ .rfk_scan = rtw8852c_rfk_scan,
+ .rfk_track = rtw8852c_rfk_track,
.power_trim = rtw8852c_power_trim,
+ .set_txpwr = rtw8852c_set_txpwr,
+ .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852c_init_txpwr_unit,
+ .get_thermal = rtw8852c_get_thermal,
+ .query_ppdu = rtw8852c_query_ppdu,
.read_rf = rtw89_phy_read_rf_v1,
.write_rf = rtw89_phy_write_rf_v1,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
+ .fill_txdesc = rtw89_core_fill_txdesc_v1,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
.resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+
+ .btc_set_rfe = rtw8852c_btc_set_rfe,
+ .btc_init_cfg = rtw8852c_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852c_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852c_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852c_btc_get_bt_rssi,
+ .btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp,
+ .btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby,
};
const struct rtw89_chip_info rtw8852c_chip_info = {
.chip_id = RTL8852C,
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
+ .hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
.dle_mem = rtw8852c_dle_mem_pcie,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
+ .bb_table = &rtw89_8852c_phy_bb_table,
+ .bb_gain_table = &rtw89_8852c_phy_bb_gain_table,
+ .rf_table = {&rtw89_8852c_phy_radiob_table,
+ &rtw89_8852c_phy_radioa_table,},
+ .nctl_table = &rtw89_8852c_phy_nctl_table,
+ .byr_table = &rtw89_8852c_byr_table,
+ .txpwr_lmt_2g = &rtw89_8852c_txpwr_lmt_2g,
+ .txpwr_lmt_5g = &rtw89_8852c_txpwr_lmt_5g,
+ .txpwr_lmt_6g = &rtw89_8852c_txpwr_lmt_6g,
+ .txpwr_lmt_ru_2g = &rtw89_8852c_txpwr_lmt_ru_2g,
+ .txpwr_lmt_ru_5g = &rtw89_8852c_txpwr_lmt_ru_5g,
+ .txpwr_lmt_ru_6g = &rtw89_8852c_txpwr_lmt_ru_6g,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .hw_sec_hdr = true,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -512,7 +2804,12 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dav_log_efuse_size = 16,
.phycap_addr = 0x590,
.phycap_size = 0x60,
+ .low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1,
.hci_func_en_addr = R_AX_HCI_FUNC_EN_V1,
+ .h2c_desc_size = sizeof(struct rtw89_rxdesc_short),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body_v1),
.h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1,
.h2c_regs = rtw8852c_h2c_regs,
.c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1,
@@ -520,6 +2817,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.page_regs = &rtw8852c_page_regs,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 5,
+ .imr_info = &rtw8852c_imr_info
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
index d0594716040b..558dd0f048f2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -8,6 +8,8 @@
#include "core.h"
#define RF_PATH_NUM_8852C 2
+#define BB_PATH_NUM_8852C 2
+#define NTX_NUM_8852C 2
struct rtw8852c_u_efuse {
u8 rsvd[0x38];
@@ -58,13 +60,23 @@ struct rtw8852c_efuse {
u8 rsvd7[3];
u8 path_a_therm;
u8 path_b_therm;
- u8 rsvd8[46];
+ u8 rsvd8[2];
+ u8 rx_gain_2g_ofdm;
+ u8 rsvd9;
+ u8 rx_gain_2g_cck;
+ u8 rsvd10;
+ u8 rx_gain_5g_low;
+ u8 rsvd11;
+ u8 rx_gain_5g_mid;
+ u8 rsvd12;
+ u8 rx_gain_5g_high;
+ u8 rsvd13[35];
u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
- u8 rsvd9[10];
+ u8 rsvd14[10];
u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
- u8 rsvd10[110];
+ u8 rsvd15[110];
u8 channel_plan_6g;
- u8 rsvd11[71];
+ u8 rsvd16[71];
union {
struct rtw8852c_u_efuse u;
struct rtw8852c_e_efuse e;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
new file mode 100644
index 000000000000..ffc71ad24927
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -0,0 +1,4023 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+#include "rtw8852c_rfk.h"
+#include "rtw8852c_rfk_table.h"
+#include "rtw8852c_table.h"
+
+#define _TSSI_DE_MASK GENMASK(21, 12)
+static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
+static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
+static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
+static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
+static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
+static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
+static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
+static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
+
+static const u32 rtw8852c_backup_bb_regs[] = {
+ 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220,
+ 0xc1d4, 0xc1d8, 0xc1e8
+};
+
+static const u32 rtw8852c_backup_rf_regs[] = {
+ 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
+};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
+
+#define RXK_GROUP_NR 4
+static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
+static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
+static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
+static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
+static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
+static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
+
+#define TXK_GROUP_NR 3
+static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
+static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
+static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
+static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
+static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
+static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
+static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
+static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
+static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
+
+static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
+ {0x8190, 0x8194, 0x8198, 0x81a4},
+ {0x81a8, 0x81c4, 0x81c8, 0x81e8},
+};
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en)
+ return RF_AB;
+
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+}
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup bb reg : %x, value =%x\n",
+ rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path,
+ rtw8852c_backup_rf_regs[i], RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
+ rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore bb reg : %x, value =%x\n",
+ rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u8 path;
+ u32 rf_mode;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _addck_backup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+ dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
+ B_ADDCKR0_A0);
+ dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
+ B_ADDCKR0_A1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
+ dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
+ B_ADDCKR1_A0);
+ dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
+ B_ADDCKR1_A1);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
+ dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
+ dack->addck_d[0][1]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
+ dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
+ dack->addck_d[1][1]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK_S0P2,
+ B_DACK_S0M0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK_S0P3,
+ B_DACK_S0M1);
+ }
+ dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
+ B_DACK_BIAS00);
+ dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
+ B_DACK_BIAS01);
+ dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
+ B_DACK_DADCK00);
+ dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
+ B_DACK_DADCK01);
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
+ dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK10S,
+ B_DACK10S);
+ rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
+ dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK11S,
+ B_DACK11S);
+ }
+ dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
+ B_DACK_BIAS10);
+ dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
+ B_DACK_BIAS11);
+ dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
+ B_DACK_DADCK10);
+ dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
+ B_DACK_DADCK11);
+}
+
+static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 idx_offset, path_offset;
+ u32 val32, offset, addr;
+ u8 i;
+
+ idx_offset = (index == 0 ? 0 : 0x14);
+ path_offset = (path == RF_PATH_A ? 0 : 0x28);
+ offset = idx_offset + path_offset;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
+
+ /* msbk_d: 15/14/13/12 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ addr = 0xc200 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 11/10/9/8 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ addr = 0xc204 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 7/6/5/4 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ addr = 0xc208 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 3/2/1/0 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i] << (i * 8);
+ addr = 0xc20c + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* dadak_d/biask_d */
+ val32 = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ addr = 0xc210 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_phy_write32_set(rtwdev, addr, BIT(1));
+}
+
+static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 i;
+
+ for (i = 0; i < 2; i++)
+ _dack_reload_by_path(rtwdev, path, i);
+}
+
+static void _addck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ /* S0 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
+ fsleep(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc0fc, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
+
+ /* S1 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc1fc, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
+}
+
+static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_dack_reset_defs_a_tbl,
+ &rtw8852c_dack_reset_defs_b_tbl);
+}
+
+enum adc_ck {
+ ADC_NA = 0,
+ ADC_480M = 1,
+ ADC_960M = 2,
+ ADC_1920M = 3,
+};
+
+enum dac_ck {
+ DAC_40M = 0,
+ DAC_80M = 1,
+ DAC_120M = 2,
+ DAC_160M = 3,
+ DAC_240M = 4,
+ DAC_320M = 5,
+ DAC_480M = 6,
+ DAC_960M = 7,
+};
+
+enum rf_mode {
+ RF_SHUT_DOWN = 0x0,
+ RF_STANDBY = 0x1,
+ RF_TX = 0x2,
+ RF_RX = 0x3,
+ RF_TXIQK = 0x4,
+ RF_DPK = 0x5,
+ RF_RXK1 = 0x6,
+ RF_RXK2 = 0x7,
+};
+
+static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
+ enum dac_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
+}
+
+static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
+ enum adc_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+}
+
+static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
+{
+ if (s0) {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+ } else {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
+
+ _dack_reset(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
+ ret = read_poll_timeout_atomic(_check_dack_done, done, done,
+ 1, 10000, false, rtwdev, true);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
+ rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
+
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
+
+ _dack_reset(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
+ ret = read_poll_timeout_atomic(_check_dack_done, done, done,
+ 1, 10000, false, rtwdev, false);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
+ rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
+
+ _dack_backup_s1(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_B);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc0c8, BIT(3));
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
+
+ val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 rf0_0, rf1_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+ rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
+ rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
+ _drck(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _addck(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+
+ _addck_backup(rtwdev);
+ _addck_reload(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _dack(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+#define RTW8852C_NCTL_VER 0xd
+#define RTW8852C_IQK_VER 0x2a
+#define RTW8852C_IQK_SS 2
+#define RTW8852C_IQK_THR_REK 8
+#define RTW8852C_IQK_CFIR_GROUP_NR 4
+
+enum rtw8852c_iqk_type {
+ ID_TXAGC,
+ ID_G_FLOK_COARSE,
+ ID_A_FLOK_COARSE,
+ ID_G_FLOK_FINE,
+ ID_A_FLOK_FINE,
+ ID_FLOK_VBUFFER,
+ ID_TXK,
+ ID_RXAGC,
+ ID_RXK,
+ ID_NBTXK,
+ ID_NBRXK,
+};
+
+static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
+{
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
+}
+
+static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
+
+ switch (iqk_info->iqk_bw[path]) {
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
+{
+ u32 tmp;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
+
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
+
+ return false;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
+ u32 iqk_cmd;
+ bool fail;
+
+ switch (ktype) {
+ case ID_TXAGC:
+ iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_A_FLOK_COARSE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x008 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_COARSE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_A_FLOK_FINE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x508 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_FINE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_FLOK_VBUFFER:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+ iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
+ break;
+ case ID_NBTXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+ iqk_cmd = 0x408 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ fsleep(15);
+ fail = _iqk_check_cal(rtwdev, path, ktype);
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+
+ return fail;
+}
+
+static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u32 tmp;
+ u32 bkrf0;
+ u8 gp;
+
+ bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
+ if (path == RF_PATH_B) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
+ break;
+ }
+
+ fsleep(10);
+
+ for (gp = 0; gp < RXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
+ _rxk_g_idxattc2[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
+ _rxk_a_idxattc2[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_a6_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
+ _rxk_a6_idxattc2[gp]);
+ break;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP_V1, gp);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ }
+
+ if (path == RF_PATH_B)
+ rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
+
+ if (fail) {
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->nb_rxcfir[path] = 0x40000000;
+ iqk_info->is_wb_rxiqk[path] = true;
+ }
+
+ return false;
+}
+
+static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u32 tmp;
+ u32 bkrf0;
+ u8 gp = 0x2;
+
+ bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
+ if (path == RF_PATH_B) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
+ break;
+ }
+
+ fsleep(10);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+
+ if (path == RF_PATH_B)
+ rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
+
+ if (fail)
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+
+ iqk_info->is_wb_rxiqk[path] = false;
+ return fail;
+}
+
+static bool _txk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u8 gp;
+
+ for (gp = 0; gp < TXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a_itqt[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_a6_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_a6_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_a6_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a6_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP, gp + 1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ }
+
+ if (fail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ iqk_info->nb_txcfir[path] = 0x40000000;
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ return fail;
+}
+
+static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u8 gp = 0x2;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a_itqt[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a6_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+
+ if (!fail)
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_txcfir[path] = 0x40000002;
+
+ iqk_info->is_wb_txiqk[path] = false;
+
+ return fail;
+}
+
+static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx = mcc_info->table_idx;
+ bool is_fail1, is_fail2;
+ u32 val;
+ u32 core_i;
+ u32 core_q;
+ u32 vbuff_i;
+ u32 vbuff_q;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
+ core_i = FIELD_GET(RR_TXMO_COI, val);
+ core_q = FIELD_GET(RR_TXMO_COQ, val);
+
+ if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
+ is_fail1 = true;
+ else
+ is_fail1 = false;
+
+ iqk_info->lok_idac[idx][path] = val;
+
+ val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
+ vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
+ vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
+
+ if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
+ is_fail2 = true;
+ else
+ is_fail2 = false;
+
+ iqk_info->lok_vbuf[idx][path] = val;
+
+ return is_fail1 || is_fail2;
+}
+
+static bool _iqk_lok(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 tmp_id = 0x0;
+ bool fail = false;
+ bool tmp = false;
+
+ /* Step 0: Init RF gain & tone idx= 8.25Mhz */
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
+
+ /* Step 1 START: _lok_coarse_fine_wi_swap */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_G_FLOK_COARSE;
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_COARSE;
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_COARSE;
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
+ iqk_info->lok_cor_fail[0][path] = tmp;
+
+ /* Step 2 */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+
+ /* Step 3 */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_G_FLOK_FINE;
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_FINE;
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_FINE;
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
+ iqk_info->lok_fin_fail[0][path] = tmp;
+
+ /* Step 4 large rf gain */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+ fail = _lok_finetune_check(rtwdev, path);
+
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ }
+}
+
+static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp;
+ bool flag;
+
+ iqk_info->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ iqk_info->thermal_rek_en = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
+ iqk_info->thermal[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
+ iqk_info->lok_cor_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
+ iqk_info->lok_fin_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
+ iqk_info->iqk_tx_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
+ iqk_info->iqk_rx_fail[0][path]);
+
+ flag = iqk_info->lok_cor_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
+ flag = iqk_info->lok_fin_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
+ flag = iqk_info->iqk_tx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
+ flag = iqk_info->iqk_rx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
+ iqk_info->bp_iqkenable[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_txkresult[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_rxkresult[path] = tmp;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
+ iqk_info->iqk_times);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
+ if (tmp != 0x0)
+ iqk_info->iqk_fail_cnt++;
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
+ iqk_info->iqk_fail_cnt);
+}
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ _iqk_txk_setting(rtwdev, path);
+ iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
+
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_rxk_setting(rtwdev, path);
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_info_iqk(rtwdev, phy_idx, path);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ iqk_info->iqk_band[path] = hal->current_band_type;
+ iqk_info->iqk_bw[path] = hal->current_band_width;
+ iqk_info->iqk_ch[path] = hal->current_channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
+ iqk_info->iqk_band[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
+ path, iqk_info->iqk_bw[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
+ path, iqk_info->iqk_ch[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
+ rtwdev->dbcc_en ? "on" : "off",
+ iqk_info->iqk_band[path] == 0 ? "2G" :
+ iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
+ iqk_info->iqk_ch[path],
+ iqk_info->iqk_bw[path] == 0 ? "20M" :
+ iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
+ if (!rtwdev->dbcc_en)
+ iqk_info->syn1to2 = 0x1;
+ else
+ iqk_info->syn1to2 = 0x3;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
+ iqk_info->iqk_band[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
+ iqk_info->iqk_bw[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
+ iqk_info->iqk_ch[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00001219 + (path << 4));
+ fsleep(200);
+ fail = _iqk_check_cal(rtwdev, path, 0x12);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_iqk_afebb_restore_defs_a_tbl,
+ &rtw8852c_iqk_afebb_restore_defs_b_tbl);
+
+ rtw8852c_disable_rxagc(rtwdev, path, 0x1);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ u8 idx = 0;
+
+ idx = mcc_info->table_idx;
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
+
+ /* 01_BB_AFE_for DPK_S0_20210820 */
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+
+ /* disable rxgac */
+ rtw8852c_disable_rxagc(rtwdev, path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
+
+ rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
+
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5, rck_val = 0;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
+ false, rtwdev, path, 0x1c, BIT(3));
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 ch, path;
+
+ rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->thermal_rek_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
+ iqk_info->iqk_channel[ch] = 0x0;
+ for (path = 0; path < RTW8852C_IQK_SS; path++) {
+ iqk_info->lok_cor_fail[ch][path] = false;
+ iqk_info->lok_fin_fail[ch][path] = false;
+ iqk_info->iqk_tx_fail[ch][path] = false;
+ iqk_info->iqk_rx_fail[ch][path] = false;
+ iqk_info->iqk_mcc_ch[ch][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK strat!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->kcount = 0;
+ iqk_info->version = RTW8852C_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_restore_bb_reg(rtwdev, backup_bb_val);
+ _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ switch (_kpath(rtwdev, phy_idx)) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
+{
+ int ret;
+ u32 val;
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
+ 2, 1000, false, rtwdev, path, 0x93, BIT(5));
+ if (ret)
+ rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+}
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ bool is_afe)
+{
+ u8 res;
+
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
+
+ _rx_dck_toggle(rtwdev, path);
+ if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
+ return;
+ res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
+ if (res > 1) {
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
+ _rx_dck_toggle(rtwdev, path);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
+ }
+}
+
+#define RTW8852C_RF_REL_VERSION 34
+#define RTW8852C_DPK_VER 0x10
+#define RTW8852C_DPK_TH_AVG_NUM 4
+#define RTW8852C_DPK_RF_PATH 2
+#define RTW8852C_DPK_KIP_REG_NUM 5
+#define RTW8852C_DPK_RXSRAM_DBG 0
+
+enum rtw8852c_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+ DPK_RXAGC = 0x15,
+ KIP_PRESET = 0x16,
+ KIP_RESTORE = 0x17,
+ DPK_TXAGC = 0x19,
+ D_KIP_PRESET = 0x28,
+ D_TXAGC = 0x29,
+ D_RXAGC = 0x2a,
+ D_SYNC = 0x2b,
+ D_GAIN_LOSS = 0x2c,
+ D_MDPK_IDL = 0x2d,
+ D_GAIN_NORM = 0x2f,
+ D_KIP_THERMAL = 0x30,
+ D_KIP_RESTORE = 0x31
+};
+
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off);
+
+static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
+ reg_bkup[path][i] =
+ rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
+ MASKDWORD, reg_bkup[path][i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
+{
+ u16 dpk_cmd;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
+ mdelay(10);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
+ dpk_cmd, ret);
+
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot over 20ms!!!!\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = hal->current_band_type;
+ dpk->bp[path][kidx].ch = hal->current_channel;
+ dpk->bp[path][kidx].bw = hal->current_band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ /*1. Keep ADC_fifo reset*/
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+
+ /*2. BB for IQK DBG mode*/
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
+
+ /*3.Set DAC clk*/
+ rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
+
+ /*4. Set ADC clk*/
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
+ B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
+
+ /*5. ADDA fifo rst*/
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
+}
+
+static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
+ B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
+{
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
+ ctrl_by_kip ? "KIP" : "BB");
+}
+
+static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
+{
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n",
+ path, force ? "on" : "off");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ _dpk_txpwr_bb_force(rtwdev, path, false);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
+ u8 cur_rxbb;
+ u32 rf_11, reg_81cc;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+
+ _dpk_kip_control_rfc(rtwdev, path, false);
+
+ cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+ rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
+ reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_SW);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
+
+ _dpk_kip_control_rfc(rtwdev, path, false);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
+
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x50121 | BIT(rtwdev->dbcc_en));
+ rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x50101 | BIT(rtwdev->dbcc_en));
+ rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
+ rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
+ }
+}
+
+static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
+ } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
+ } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u16 dc_i, dc_q;
+ u8 corr_val, corr_idx, rxbb;
+ u8 rxbb_ov;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
+ corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
+
+ dpk->corr_idx[path][kidx] = corr_idx;
+ dpk->corr_val[path][kidx] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+ dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
+ path, corr_idx, corr_val, dc_i, dc_q);
+
+ dpk->dc_i[path][kidx] = dc_i;
+ dpk->dc_q[path][kidx] = dc_q;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
+ rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
+ rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
+ path, rxbb,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
+ rxbb_ov);
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain = 0x0;
+
+ rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+ dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
+
+ return dgain;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ u8 result;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+ result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
+
+ return result;
+}
+
+static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
+ dpk->cur_k_set =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
+{
+ if (set_from_bb) {
+ dbm = clamp_t(u8, dbm, 7, 24);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
+ }
+ _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
+ _dpk_kset_query(rtwdev, path);
+}
+
+static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
+ _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
+
+ return _dpk_gainloss_read(rtwdev);
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+
+ if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+ return true;
+ else
+ return false;
+}
+
+static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
+
+ return _dpk_sync_check(rtwdev, path, kidx);
+}
+
+static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
+
+ for (addr = 0; addr < 0x200; addr++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
+}
+
+static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
+ u8 tmp_rxbb;
+ u8 goout = 0, agc_cnt = 0;
+ u16 dgain = 0;
+ bool is_fail = false;
+ int limit = 200;
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
+
+ if (RTW8852C_DPK_RXSRAM_DBG)
+ _dpk_read_rxsram(rtwdev);
+
+ if (is_fail) {
+ goout = 1;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+
+ if (dgain > 0x5fc || dgain < 0x556) {
+ _dpk_one_shot(rtwdev, phy, path, D_SYNC);
+ dgain = _dpk_dgain_read(rtwdev);
+ }
+
+ if (agc_cnt == 0) {
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ _dpk_bypass_rxiqc(rtwdev, path);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ break;
+
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
+ tmp_gl_idx >= 7)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+ break;
+
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_dbm <= 7) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
+ _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
+ }
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_dbm >= 24) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
+ _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
+ }
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+ if (tmp_rxbb + tmp_gl_idx > 0x1f)
+ tmp_rxbb = 0x1f;
+ else
+ tmp_rxbb = tmp_rxbb + tmp_gl_idx;
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
+ tmp_gl_idx, tmp_rxbb);
+ _dpk_kip_control_rfc(rtwdev, path, true);
+ goout = 1;
+ break;
+ default:
+ goout = 1;
+ break;
+ }
+ } while (!goout && agc_cnt < 6 && --limit > 0);
+
+ if (limit <= 0)
+ rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
+
+ return is_fail;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
+{
+ static const struct rtw89_rfk_tbl *order_tbls[] = {
+ &rtw8852c_dpk_mdpd_order0_defs_tbl,
+ &rtw8852c_dpk_mdpd_order1_defs_tbl,
+ &rtw8852c_dpk_mdpd_order2_defs_tbl,
+ &rtw8852c_dpk_mdpd_order3_defs_tbl,
+ };
+
+ if (order >= ARRAY_SIZE(order_tbls)) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ return;
+ }
+
+ rtw89_rfk_parser(rtwdev, order_tbls[order]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
+ order == 0x0 ? "(5,3,1)" :
+ order == 0x1 ? "(5,3,0)" :
+ order == 0x2 ? "(5,0,0)" : "(7,3,1)");
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 cnt;
+ u8 ov_flag;
+ u32 dpk_sync;
+
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x1);
+ else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ _dpk_set_mdpd_para(rtwdev, 0x1);
+ else
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
+ fsleep(1000);
+
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+ dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
+ ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
+ for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
+ ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
+ }
+
+ if (ov_flag) {
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ }
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ bool is_reload = false;
+ u8 idx, cur_band, cur_ch;
+
+ cur_band = rtwdev->hal.current_band_type;
+ cur_ch = rtwdev->hal.current_channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
+{
+ rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
+ &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
+}
+
+static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ if (rtwdev->hal.cv == CHIP_CAV)
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_SEL, 0x01);
+ else
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_SEL, 0x0c);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
+
+ _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
+}
+
+static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define _DPK_PARA_TXAGC GENMASK(15, 10)
+#define _DPK_PARA_THER GENMASK(31, 26)
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u32 para;
+
+ para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ MASKDWORD);
+
+ dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
+ dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
+ dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
+}
+
+static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, bool is_execute)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (is_execute) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
+
+ _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ 0x0000007F, 0x5b);
+ }
+ dpk->bp[path][kidx].gs =
+ rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ 0x0000007F);
+}
+
+static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
+{
+ u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
+ u8 val;
+
+ switch (val32) {
+ case 0:
+ val = 0x6;
+ break;
+ case 1:
+ val = 0x2;
+ break;
+ case 2:
+ val = 0x0;
+ break;
+ case 3:
+ val = 0x7;
+ break;
+ default:
+ val = 0xff;
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
+
+ return val;
+}
+
+static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_ORDER, _dpk_order_convert(rtwdev));
+
+ dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
+ dpk->bp[path][kidx].path_ok = true;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
+ path, kidx, dpk->bp[path][kidx].mdpd_en);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
+
+ _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 kidx = dpk->cur_idx[path];
+ u8 init_xdbm = 15;
+ bool is_fail;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ _rf_direct_cntrl(rtwdev, path, false);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _set_rx_dck(rtwdev, phy, path, false);
+ _dpk_kip_pwr_clk_onoff(rtwdev, true);
+ _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
+ _dpk_txpwr_bb_force(rtwdev, path, true);
+ _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
+ _dpk_tpg_sel(rtwdev, path, kidx);
+
+ is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
+ if (is_fail)
+ goto _error;
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx);
+ _dpk_para_query(rtwdev, path, kidx);
+ _dpk_on(rtwdev, phy, path, kidx);
+
+_error:
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
+ dpk->cur_k_set, is_fail ? "need Check" : "is Success");
+
+ return is_fail;
+}
+
+static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].path_ok = false;
+}
+
+static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
+ u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
+ u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
+ u8 path;
+ bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
+
+ if (dpk->is_dpk_reload_en) {
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch != 0)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+ } else {
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
+ dpk->cur_idx[path] = 0;
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Init =========\n",
+ path, dpk->cur_idx[path]);
+ _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_information(rtwdev, phy, path);
+ _dpk_init(rtwdev, path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n",
+ path, dpk->cur_idx[path]);
+ rtw8852c_disable_rxagc(rtwdev, path, 0x0);
+ _dpk_drf_direct_cntrl(rtwdev, path, false);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ is_fail = _dpk_main(rtwdev, phy, path, 1);
+ _dpk_onoff(rtwdev, path, is_fail);
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Restore =========\n",
+ path, dpk->cur_idx[path]);
+ _dpk_kip_restore(rtwdev, phy, path);
+ _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_bb_afe_restore(rtwdev, path);
+ rtw8852c_disable_rxagc(rtwdev, path, 0x1);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+
+ _dpk_kip_pwr_clk_onoff(rtwdev, false);
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+
+ if (rtwdev->hal.cv == CHIP_CAV && rtwdev->hal.current_band_type != RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
+ return true;
+ } else if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_6g && rtwdev->hal.current_band_type == RTW89_BAND_6G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
+ RTW8852C_DPK_VER, rtwdev->hal.cv,
+ RTW8852C_RF_REL_VERSION);
+
+ if (_dpk_bypass_check(rtwdev, phy))
+ _dpk_force_bypass(rtwdev, phy);
+ else
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
+ rtw8852c_rx_dck(rtwdev, phy, false);
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
+ dpk->bp[path][kidx].mdpd_en : 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_MEN, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 path, kidx;
+ u8 txagc_rf = 0;
+ s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
+ u8 cur_ther;
+ s8 delta_ther = 0;
+ s16 pwsf_tssi_ofst;
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ kidx = dpk->cur_idx[path];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ txagc_rf =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
+ txagc_bb =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
+ txagc_bb_tp =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
+
+ /* report from KIP */
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
+ cur_ther =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
+ txagc_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
+ pwsf_tssi_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
+ pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
+ delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ delta_ther = delta_ther * 1 / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
+ delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
+ txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
+ dpk->bp[path][kidx].txagc_dpk);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
+ txagc_ofst, pwsf_tssi_ofst);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
+ txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x07FC0000, 0x78 - delta_ther);
+ }
+ }
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_sys_defs_2g_a_tbl,
+ &rtw8852c_tssi_sys_defs_5g_a_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_sys_defs_2g_b_tbl,
+ &rtw8852c_tssi_sys_defs_5g_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
+ &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
+ &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ if (path == RF_PATH_A) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_dck_defs_2g_a_tbl,
+ &rtw8852c_tssi_dck_defs_5g_a_tbl);
+ } else {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_dck_defs_2g_b_tbl,
+ &rtw8852c_tssi_dck_defs_5g_b_tbl);
+ }
+}
+
+static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_set_bbgain_split_a_tbl,
+ &rtw8852c_tssi_set_bbgain_split_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel;
+ u8 subband = rtwdev->hal.current_subband;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef RTW8852C_TSSI_GET_VAL
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ if (path == RF_PATH_A) {
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
+ &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
+ } else {
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
+ &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
+ }
+}
+
+static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_rfk_tbl *tbl;
+
+ if (path == RF_PATH_A) {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
+ else if (band == RTW89_BAND_6G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
+ else
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
+ } else {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
+ else if (band == RTW89_BAND_6G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
+ else
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
+ }
+
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_slope_defs_a_tbl,
+ &rtw8852c_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_run_slope_defs_a_tbl,
+ &rtw8852c_tssi_run_slope_defs_b_tbl);
+}
+
+static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_track_defs_a_tbl,
+ &rtw8852c_tssi_track_defs_b_tbl);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
+ &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
+ &rtw8852c_tssi_enable_defs_a_tbl,
+ &rtw8852c_tssi_enable_defs_b_tbl);
+
+ tssi_info->base_thermal[i] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
+ rtwdev->is_tssi_mode[i] = true;
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ if (i == RF_PATH_A) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ } else if (i == RF_PATH_B) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ }
+ }
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+ u8 ch = rtwdev->hal.current_channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ s8 val;
+
+ if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+ } else {
+ gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+ u8 ch = rtwdev->hal.current_channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st = 0;
+ s8 tde_2nd = 0;
+ s8 val;
+
+ if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+ } else {
+ tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel;
+ u8 gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
+ _TSSI_DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
+ _TSSI_DE_MASK));
+ }
+}
+
+static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {0x5818, 0x7818};
+ static const u32 tssi_en[2] = {0x5820, 0x7820};
+
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
+ rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
+ if (rtwdev->dbcc_en && path == RF_PATH_B)
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ else
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
+ rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
+ }
+}
+
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool is_dav)
+{
+ u32 rf_reg18;
+ u32 reg_reg18_addr;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (is_dav)
+ reg_reg18_addr = RR_CFGCH;
+ else
+ reg_reg18_addr = RR_CFGCH_V1;
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ bool is_dav;
+ u8 kpath, path;
+ u32 tmp = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < 2; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ is_dav = true;
+ _bw_setting(rtwdev, path, bw, is_dav);
+ is_dav = false;
+ _bw_setting(rtwdev, path, bw, is_dav);
+ if (rtwdev->dbcc_en)
+ continue;
+
+ if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
+ fsleep(100);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ }
+ }
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, enum rtw89_band band, bool is_dav)
+{
+ u32 rf_reg18;
+ u32 reg_reg18_addr;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (is_dav)
+ reg_reg18_addr = 0x18;
+ else
+ reg_reg18_addr = 0x10018;
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
+ break;
+ case RTW89_BAND_5G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
+ break;
+ default:
+ break;
+ }
+ rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
+ fsleep(100);
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band)
+{
+ u8 kpath, path;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (band != RTW89_BAND_6G) {
+ if ((central_ch > 14 && central_ch < 36) ||
+ (central_ch > 64 && central_ch < 100) ||
+ (central_ch > 144 && central_ch < 149) || central_ch > 177)
+ return;
+ } else {
+ if (central_ch > 253 || central_ch == 2)
+ return;
+ }
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < 2; path++) {
+ if (kpath & BIT(path)) {
+ _ch_setting(rtwdev, path, central_ch, band, true);
+ _ch_setting(rtwdev, path, central_ch, band, false);
+ }
+ }
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath;
+ u8 path;
+ u32 val;
+
+ kpath = _kpath(rtwdev, phy);
+ for (path = 0; path < 2; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ val = 0x1b;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ val = 0x13;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ val = 0xb;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ default:
+ val = 0x3;
+ break;
+ }
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+ }
+}
+
+static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_lck_info *lck = &rtwdev->lck;
+ int path;
+
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ lck->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
+ }
+}
+
+static void _lck(struct rtw89_dev *rtwdev)
+{
+ u32 tmp18[2];
+ int path = rtwdev->dbcc_en ? 2 : 1;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
+
+ tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+
+ for (i = 0; i < path; i++) {
+ rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
+ rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ _lck_keep_thermal(rtwdev);
+}
+
+#define RTW8852C_LCK_TH 8
+
+void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_lck_info *lck = &rtwdev->lck;
+ u8 cur_thermal;
+ int delta;
+ int path;
+
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ cur_thermal =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ delta = abs((int)cur_thermal - lck->thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
+ path, cur_thermal, delta);
+
+ if (delta >= RTW8852C_LCK_TH) {
+ _lck(rtwdev);
+ return;
+ }
+ }
+}
+
+void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
+{
+ _lck_keep_thermal(rtwdev);
+}
+
+static
+void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, phy, central_ch, band);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type,
+ param->bandwidth);
+}
+
+void rtw8852c_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < 2; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852c_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+#define RXDCK_VER_8852C 0xe
+
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+{
+ u8 path, kpath;
+ u32 rf_reg5;
+
+ kpath = _kpath(rtwdev, phy);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
+ RXDCK_VER_8852C, rtwdev->hal.cv);
+
+ for (path = 0; path < 2; path++) {
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ if (!(kpath & BIT(path)))
+ continue;
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path, is_afe);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x0);
+ }
+}
+
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ rtwdev->dpk.is_dpk_enable = true;
+ rtwdev->dpk.is_dpk_reload_en = false;
+ _dpk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_bbgain_split(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_slope(rtwdev, phy, i);
+ _tssi_run_slope(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
+ __func__, phy);
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A])
+ return;
+ if (!rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_set_aligk_default(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 i;
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (enable) {
+ /* SCAN_START */
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_A] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
+ B_TXAGC_BB);
+ if (tssi_info->default_txagc_offset[RF_PATH_A])
+ break;
+ }
+ }
+
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_B] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
+ B_TXAGC_BB_S1);
+ if (tssi_info->default_txagc_offset[RF_PATH_B])
+ break;
+ }
+ }
+ } else {
+ /* SCAN_END */
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_A]);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_B]);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+ }
+}
+
+void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
+ bool scan_start, enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start)
+ rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
+ else
+ rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
new file mode 100644
index 000000000000..e42fb1a4965e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_RFK_H__
+#define __RTW89_8852C_RFK_H__
+
+#include "core.h"
+
+void rtw8852c_rck(struct rtw89_dev *rtwdev);
+void rtw8852c_dack(struct rtw89_dev *rtwdev);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx);
+void rtw8852c_lck_init(struct rtw89_dev *rtwdev);
+void rtw8852c_lck_track(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c
new file mode 100644
index 000000000000..d727d528b365
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "rtw8852c_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852c_dack_reload_defs[] = {
+ RTW89_DECL_RFK_WM(0xc004, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc024, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc104, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc124, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reload_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_a[] = {
+ RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x0),
+ RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_b[] = {
+ RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x0),
+ RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_dack_defs_s0[] = {
+ RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc004, 0xfff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xc024, 0xfff00000, 0x30),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s0);
+
+static const struct rtw89_reg5_def rtw8852c_dack_defs_s1[] = {
+ RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc104, 0xfff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xc124, 0xfff00000, 0x30),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s1);
+
+static const struct rtw89_reg5_def rtw8852c_drck_defs[] = {
+ RTW89_DECL_RFK_WM(0xc0c4, BIT(6), 0x0),
+ RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x1),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_drck_defs);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_rxk_cfg_defs[] = {
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x03),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_rxk_cfg_defs);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00100000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5670, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_A, 0x10005, 0x00001, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00200000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7670, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_B, 0x10005, 0x00001, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_read_rxsram_pre_defs[] = {
+ RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x1),
+ RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x1),
+ RTW89_DECL_RFK_WM(0x80d4, MASKDWORD, 0x00020000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_pre_defs);
+
+static const struct rtw89_reg5_def rtw8852c_read_rxsram_post_defs[] = {
+ RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x0),
+ RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_post_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order0_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x2),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order0_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order1_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x1),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order1_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order2_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x2),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x0),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order2_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order3_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x3),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x3),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order3_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_on_defs[] = {
+ RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000080),
+ RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x807f030a),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_on_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_off_defs[] = {
+ RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x80000000),
+ RTW89_DECL_RFK_WM(0x80f4, BIT(18), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_off_defs);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = {
+ RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19),
+ RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041),
+ RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000),
+ RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280),
+ RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000),
+ RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280),
+ RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x0),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00000fff, 0x0),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00fff000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x0),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00000fff, 0x0),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00fff000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x1af),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x1af),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x0003c000, 0xb),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x6),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x0003c000, 0xb),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x6),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_a[] = {
+ RTW89_DECL_RFK_WM(0x5818, 0x08000000, 0x1),
+ RTW89_DECL_RFK_WM(0x58d4, 0xf0000000, 0x7),
+ RTW89_DECL_RFK_WM(0x58f0, 0x000c0000, 0x1),
+ RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x400),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_b[] = {
+ RTW89_DECL_RFK_WM(0x7818, 0x08000000, 0x1),
+ RTW89_DECL_RFK_WM(0x78d4, 0xf0000000, 0x7),
+ RTW89_DECL_RFK_WM(0x78f0, 0x000c0000, 0x1),
+ RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x400),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x0808081e),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x081d),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0204020),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x020),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08081e21),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x1d23),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x2d2721),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x3b8),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3d2),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x042),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x06b),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x3bc),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3d6),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x03e),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x06b),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x2d2721),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x3c0),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3da),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x002),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x071),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x3c8),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e2),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x00c),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x071),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x07d),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x07d),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x080),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x080),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x0f),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x28),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x76),
+ RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x0f),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x28),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x76),
+ RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x0),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x1ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x080),
+ RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x0),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x1ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x080),
+ RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58e4, 0x00003800, 0x1),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x1),
+ RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x0),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78e4, 0x00003800, 0x1),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x1),
+ RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x0),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WRF(0x0, 0x10055, 0x00080, 0x1),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WRF(0x1, 0x10055, 0x00080, 0x1),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_b);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h
new file mode 100644
index 000000000000..953a960ef1e8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_RFK_TABLE_H__
+#define __RTW89_8852C_RFK_TABLE_H__
+
+#include "phy.h"
+
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reload_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s0_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s1_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_drck_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_rxk_cfg_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_pre_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_post_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order0_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order1_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order2_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order3_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_on_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_off_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_b_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
new file mode 100644
index 000000000000..477c46041c94
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -0,0 +1,19470 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c_table.h"
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
+ {0xF0FF0000, 0x00000000},
+ {0xF03300FF, 0x00000001},
+ {0xF03400FF, 0x00000002},
+ {0x70C, 0x00000020},
+ {0x704, 0x601E0100},
+ {0x4000, 0x00000000},
+ {0x4004, 0xCA014000},
+ {0x4008, 0xC751D4F0},
+ {0x400C, 0x44511475},
+ {0x4010, 0x00000000},
+ {0x4014, 0x00000000},
+ {0x44AC, 0x01F60380},
+ {0x4018, 0x4F4C4B4B},
+ {0x401C, 0x494A4E52},
+ {0x4020, 0x4D504E4B},
+ {0x4024, 0x4F4C4949},
+ {0x4028, 0x49484C50},
+ {0x402C, 0x4C50504C},
+ {0x4030, 0x54544D4A},
+ {0x4034, 0x504B5654},
+ {0x4038, 0x6A6C605A},
+ {0x403C, 0x48484848},
+ {0x4040, 0x48483D47},
+ {0x4044, 0x3D474848},
+ {0x4048, 0x51484848},
+ {0x404C, 0x4A4A404F},
+ {0x4050, 0x514F4C4A},
+ {0x4054, 0x524E4A4A},
+ {0x4058, 0x4A4A5154},
+ {0x405C, 0x53555554},
+ {0x4060, 0x45454545},
+ {0x4064, 0x45454144},
+ {0x4068, 0x40434445},
+ {0x406C, 0x44454545},
+ {0x4070, 0x44444043},
+ {0x4074, 0x42434444},
+ {0x4078, 0x46454444},
+ {0x407C, 0x44444843},
+ {0x4080, 0x4B4E4A47},
+ {0x4084, 0x514D4A49},
+ {0x4088, 0x4A495454},
+ {0x408C, 0x5454514D},
+ {0x4090, 0x524E4B4A},
+ {0x4094, 0x4C4B5455},
+ {0x4098, 0x55565550},
+ {0x409C, 0x5959504D},
+ {0x40A0, 0x544E5D5A},
+ {0x40A4, 0x7975665F},
+ {0x40A8, 0x48484848},
+ {0x40AC, 0x48483D47},
+ {0x40B0, 0x3D474848},
+ {0x40B4, 0x48484848},
+ {0x40B8, 0x48483E48},
+ {0x40BC, 0x3E4A4A49},
+ {0x40C0, 0x514E4948},
+ {0x40C4, 0x4A49404F},
+ {0x40C8, 0x42525555},
+ {0x40CC, 0x47474747},
+ {0x40D0, 0x47474747},
+ {0x40D4, 0x47474747},
+ {0x40D8, 0x48484848},
+ {0x40DC, 0x48474848},
+ {0x40E0, 0x4A484848},
+ {0x40E4, 0x49484847},
+ {0x40E8, 0x4847524D},
+ {0x40EC, 0x55544F4B},
+ {0x40F0, 0x00000000},
+ {0x4604, 0x4C4C4D4E},
+ {0x4608, 0x3D3D6A56},
+ {0x460C, 0x53515140},
+ {0x4610, 0x42404041},
+ {0x4614, 0x54544B48},
+ {0x4618, 0x795D5554},
+ {0x461C, 0x3E3E3D3D},
+ {0x4620, 0x47474240},
+ {0x4624, 0x55524A48},
+ {0x4ED4, 0x00000000},
+ {0x40F4, 0x00000006},
+ {0x4628, 0x00000000},
+ {0x4E9C, 0x26663333},
+ {0x4EA0, 0x6EDA4148},
+ {0x4EA4, 0x599A0000},
+ {0x4EA8, 0x40000000},
+ {0x4ED0, 0x00000001},
+ {0x40F8, 0x00000000},
+ {0x40FC, 0x8C30C30C},
+ {0x4100, 0x4C30C30C},
+ {0x4104, 0x0C30C30C},
+ {0x4108, 0x0C30C30C},
+ {0x410C, 0x0C30C30C},
+ {0x4110, 0x0C30C30C},
+ {0x4114, 0x28A28A28},
+ {0x4118, 0x28A28A28},
+ {0x411C, 0x28A28A28},
+ {0x4120, 0x28A28A28},
+ {0x4124, 0x28A28A28},
+ {0x4128, 0x28A28A28},
+ {0x412C, 0x06666666},
+ {0x4130, 0x33333333},
+ {0x4134, 0x33333333},
+ {0x4138, 0x33333333},
+ {0x413C, 0x00000031},
+ {0x462C, 0x0C30C30C},
+ {0x4630, 0x0C30C30C},
+ {0x4634, 0x28A28A28},
+ {0x4638, 0x28A28A28},
+ {0x463C, 0x33333333},
+ {0x4640, 0x00000033},
+ {0x4140, 0x5100600A},
+ {0x4144, 0x18363113},
+ {0x4148, 0x1D976DDC},
+ {0x414C, 0x1C072DD7},
+ {0x4150, 0x1127CDF4},
+ {0x4154, 0x1E37BDF1},
+ {0x4158, 0x1FB7F1D6},
+ {0x415C, 0x1EA7DDF9},
+ {0x4160, 0x1FE445DD},
+ {0x4164, 0x1F97F1FE},
+ {0x4168, 0x1FF781ED},
+ {0x416C, 0x1FA7F5FE},
+ {0x4170, 0x1E07B913},
+ {0x4174, 0x1FD7FDFF},
+ {0x4178, 0x1E17B9FA},
+ {0x417C, 0x19A66914},
+ {0x4180, 0x10F65598},
+ {0x4184, 0x14A5A111},
+ {0x4188, 0x1D3765DB},
+ {0x418C, 0x17C685CA},
+ {0x4190, 0x1107C5F3},
+ {0x4194, 0x1B5785EB},
+ {0x4198, 0x1F97ED8F},
+ {0x419C, 0x1BC7A5F3},
+ {0x41A0, 0x1FE43595},
+ {0x41A4, 0x1EB7D9FC},
+ {0x41A8, 0x1FE65DBE},
+ {0x41AC, 0x1EC7D9FC},
+ {0x41B0, 0x1976FCFF},
+ {0x41B4, 0x1F77F5FF},
+ {0x41B8, 0x1976FDEC},
+ {0x41BC, 0x198664EF},
+ {0x41C0, 0x11062D93},
+ {0x41C4, 0x10C4E910},
+ {0x41C8, 0x1CA759DB},
+ {0x41CC, 0x1335A9B5},
+ {0x41D0, 0x1097B9F3},
+ {0x41D4, 0x17B72DE1},
+ {0x41D8, 0x1F67ED42},
+ {0x41DC, 0x18074DE9},
+ {0x41E0, 0x1FD40547},
+ {0x41E4, 0x1D57ADF9},
+ {0x41E8, 0x1FE52182},
+ {0x41EC, 0x1D67B1F9},
+ {0x41F0, 0x14860CE1},
+ {0x41F4, 0x1EC7E9FE},
+ {0x41F8, 0x14860DD6},
+ {0x41FC, 0x195664C7},
+ {0x4200, 0x0005E58A},
+ {0x4204, 0x00000000},
+ {0x4208, 0x00000000},
+ {0x420C, 0x7A000000},
+ {0x4210, 0x0F9F3D7A},
+ {0x4214, 0x0040817C},
+ {0x4218, 0x00E10204},
+ {0x421C, 0x257D94CD},
+ {0x4220, 0x0802DB6D},
+ {0x4224, 0x00000200},
+ {0x4228, 0x04688000},
+ {0x4644, 0x00000000},
+ {0x4648, 0x00000000},
+ {0x464C, 0x00000000},
+ {0x4650, 0x00000020},
+ {0x4ECC, 0x00000001},
+ {0x422C, 0x0060B002},
+ {0x4230, 0x9A8249A8},
+ {0x4234, 0x26A1469E},
+ {0x4238, 0x2099A824},
+ {0x423C, 0x2359461C},
+ {0x4240, 0x1631A675},
+ {0x4244, 0x2C6B1D63},
+ {0x4248, 0x0000000E},
+ {0x424C, 0x00000001},
+ {0x4250, 0x00000001},
+ {0x4254, 0x00000000},
+ {0x4258, 0x00000000},
+ {0x425C, 0x00000000},
+ {0x4260, 0x01E0000C},
+ {0x4654, 0x00000000},
+ {0x4658, 0x00000000},
+ {0x465C, 0x0000001E},
+ {0x4E74, 0x00000000},
+ {0x4264, 0x00000000},
+ {0x4268, 0x00000000},
+ {0x426C, 0x0418317C},
+ {0x46C0, 0x00000001},
+ {0x4270, 0x00D6135C},
+ {0x46C4, 0x00000033},
+ {0x4274, 0x00000000},
+ {0x4278, 0x00000000},
+ {0x427C, 0x00000000},
+ {0x4280, 0x00000000},
+ {0x4284, 0x00000000},
+ {0x4288, 0x00000000},
+ {0x46D8, 0x00000000},
+ {0x46DC, 0x00000000},
+ {0x46E0, 0x00000000},
+ {0x46E4, 0x00000000},
+ {0x46E8, 0x00000000},
+ {0x428C, 0x00000000},
+ {0x4290, 0x00000000},
+ {0x4294, 0x00000000},
+ {0x4298, 0x84026000},
+ {0x429C, 0x0051AC20},
+ {0x46EC, 0x1020C040},
+ {0x46F0, 0xB8BEBEB8},
+ {0x46F4, 0x021102BE},
+ {0x46F8, 0x14221142},
+ {0x46FC, 0x18C4098C},
+ {0x4700, 0x00021084},
+ {0x42A0, 0x02024008},
+ {0x42A4, 0x00000000},
+ {0x42A8, 0x00000000},
+ {0x42AC, 0x22CE803C},
+ {0x42B0, 0x32000000},
+ {0x42B4, 0x996FD67D},
+ {0x42B8, 0xBD67D67D},
+ {0x42BC, 0x7D67D65B},
+ {0x42C0, 0x28029F59},
+ {0x42C4, 0x00280280},
+ {0x4704, 0x00000000},
+ {0x42C8, 0x00000000},
+ {0x42CC, 0x00000000},
+ {0x42D0, 0x00000003},
+ {0x4708, 0x00280000},
+ {0x42D4, 0x00000001},
+ {0x42D8, 0x61861800},
+ {0x42DC, 0x830C30C3},
+ {0x42E0, 0xC30C30C3},
+ {0x42E4, 0x830C30C3},
+ {0x42E8, 0x451450C3},
+ {0x42EC, 0x05145145},
+ {0x42F0, 0x05145145},
+ {0x42F4, 0x05145145},
+ {0x42F8, 0x03207145},
+ {0x42FC, 0x041C32C6},
+ {0x4300, 0x031C5247},
+ {0x4304, 0x030C5143},
+ {0x4308, 0x030C30C3},
+ {0x430C, 0x0F3CF3C3},
+ {0x4310, 0x0F3CF3CF},
+ {0x4314, 0x0F3CF3CF},
+ {0x4318, 0x0F3CF3CF},
+ {0x431C, 0x0F3CF3CF},
+ {0x4320, 0x030C10C3},
+ {0x4324, 0x051430C3},
+ {0x4328, 0x051490CB},
+ {0x432C, 0x030C70D1},
+ {0x4330, 0x050C50C7},
+ {0x4334, 0x051492CB},
+ {0x4338, 0x05145145},
+ {0x433C, 0x05145145},
+ {0x4340, 0x05145145},
+ {0x4344, 0x05145145},
+ {0x4348, 0x090CD243},
+ {0x434C, 0x0918A1C5},
+ {0x4350, 0x071C3143},
+ {0x4354, 0x071431C3},
+ {0x4358, 0x0F3CF1C5},
+ {0x435C, 0x0F3CF3CF},
+ {0x4360, 0x0F3CF3CF},
+ {0x4364, 0x0F3CF3CF},
+ {0x4368, 0x0F3CF3CF},
+ {0x436C, 0x090C91CF},
+ {0x4370, 0x11243143},
+ {0x4374, 0x9777A777},
+ {0x4378, 0xBB7BAC95},
+ {0x437C, 0xB667B889},
+ {0x4380, 0x7B9B8899},
+ {0x4384, 0x7A5567C8},
+ {0x4388, 0x2278CCCC},
+ {0x438C, 0x7C222222},
+ {0x4390, 0x0000049B},
+ {0x470C, 0x00000888},
+ {0x4EB4, 0x00000002},
+ {0x4394, 0x001CCCCC},
+ {0x4710, 0xCCCCCAAC},
+ {0x4714, 0x0000AACC},
+ {0x4398, 0x00000000},
+ {0x439C, 0x00000008},
+ {0x49A4, 0x00000000},
+ {0x43A0, 0x00000000},
+ {0x43A4, 0x00000000},
+ {0x43A8, 0x00000000},
+ {0x43AC, 0x10000000},
+ {0x43B0, 0x00401001},
+ {0x43B4, 0x00061003},
+ {0x4718, 0x00003000},
+ {0x43B8, 0x000024D8},
+ {0x43BC, 0x00000000},
+ {0x43C0, 0x10000020},
+ {0x43C4, 0x20000200},
+ {0x43C8, 0x00000000},
+ {0x43CC, 0x04000000},
+ {0x43D0, 0x44000100},
+ {0x43D4, 0x60804060},
+ {0x43D8, 0x44204210},
+ {0x43DC, 0x82108082},
+ {0x43E0, 0x82108402},
+ {0x43E4, 0xC8082108},
+ {0x43E8, 0xC8202084},
+ {0x43EC, 0x44208208},
+ {0x43F0, 0x84108204},
+ {0x43F4, 0xD0108104},
+ {0x43F8, 0xF8210108},
+ {0x43FC, 0x6431E930},
+ {0x4400, 0x02109468},
+ {0x4404, 0x10C61C22},
+ {0x4408, 0x02109469},
+ {0x440C, 0x10C61C22},
+ {0x4410, 0x00041049},
+ {0x471C, 0x0B02C080},
+ {0x4414, 0x00000000},
+ {0x4418, 0x00000000},
+ {0x441C, 0x80000000},
+ {0x4420, 0xB0200000},
+ {0x4424, 0x00001FF0},
+ {0x4780, 0xEC000000},
+ {0x4784, 0x8C400020},
+ {0x4964, 0x51089104},
+ {0x4968, 0x88448844},
+ {0x496C, 0x07000044},
+ {0x4E4C, 0x00000000},
+ {0x4428, 0x00000000},
+ {0x442C, 0x00000000},
+ {0x4430, 0x00000000},
+ {0x4434, 0x00000000},
+ {0x4438, 0x590642D0},
+ {0x443C, 0x398668A0},
+ {0x4440, 0x6C100808},
+ {0x4444, 0x4A145344},
+ {0x4448, 0x0C5B008F},
+ {0x444C, 0x6E30498A},
+ {0x4450, 0x656E371B},
+ {0x4454, 0x00000F53},
+ {0x49A8, 0x68120000},
+ {0x49AC, 0xDA0681E0},
+ {0x49BC, 0x14060180},
+ {0x49D8, 0x600603FF},
+ {0x49DC, 0x3C502000},
+ {0x49E0, 0x2C580050},
+ {0x49E4, 0x45B055EF},
+ {0x49E8, 0x00000290},
+ {0x4A0C, 0x00000001},
+ {0x4A28, 0x0DAC1B58},
+ {0x4A2C, 0x0000001E},
+ {0x4E50, 0x16878003},
+ {0x4E54, 0x0F00F078},
+ {0x4E58, 0x03C1E0B4},
+ {0x4E5C, 0x78584830},
+ {0x4E60, 0x88C0140C},
+ {0x4E64, 0x90302C24},
+ {0x4E68, 0x0F84A00A},
+ {0x4E6C, 0x00000011},
+ {0x4E78, 0x00003039},
+ {0x4E7C, 0x0000D431},
+ {0x4E80, 0x00008235},
+ {0x4E84, 0x00000000},
+ {0x4E88, 0x000056CE},
+ {0x4E8C, 0x00002B67},
+ {0x4E90, 0x00000237},
+ {0x4EB8, 0x00004624},
+ {0x4A30, 0x00000000},
+ {0x4458, 0x00000000},
+ {0x445C, 0x4801442E},
+ {0x4460, 0x0051A0B8},
+ {0x4A34, 0x0000011F},
+ {0x4EBC, 0x00000000},
+ {0x4A38, 0x0000011F},
+ {0x4EC0, 0x00000000},
+ {0x4464, 0x00000000},
+ {0x4468, 0x00000000},
+ {0x446C, 0x00000000},
+ {0x4470, 0x00000000},
+ {0x4474, 0x00000000},
+ {0x4478, 0x00000000},
+ {0x447C, 0x00000000},
+ {0x4480, 0x2A0AA040},
+ {0x4484, 0x0A886926},
+ {0x4488, 0x00000004},
+ {0x4A3C, 0x00002B1C},
+ {0x448C, 0x00000000},
+ {0x4490, 0x88000000},
+ {0x4494, 0x10000000},
+ {0x4498, 0xE0000000},
+ {0x4A08, 0x00000FE6},
+ {0x4A40, 0x00000000},
+ {0x4A44, 0x00000000},
+ {0x4A48, 0x00000000},
+ {0x4A4C, 0x00000000},
+ {0x4A50, 0x00000000},
+ {0x4A54, 0x00000000},
+ {0x449C, 0x00000019},
+ {0x44A0, 0x02B2E394},
+ {0x44A4, 0x00000400},
+ {0x4A58, 0x14285208},
+ {0x4A84, 0x02850A14},
+ {0x4A88, 0x048D0A14},
+ {0x4A8C, 0x01123401},
+ {0x4A90, 0x34011234},
+ {0x4A94, 0x23450112},
+ {0x4A98, 0x45123451},
+ {0x4AAC, 0x12345123},
+ {0x4AB0, 0x00000000},
+ {0x44A8, 0x00000001},
+ {0x44B0, 0x00000000},
+ {0x44B4, 0x00000000},
+ {0x44B8, 0x00000000},
+ {0x44BC, 0x00000000},
+ {0x44C0, 0x00000000},
+ {0x44C4, 0x00000000},
+ {0x44C8, 0x00000000},
+ {0x44CC, 0x00000000},
+ {0x44D0, 0x00000000},
+ {0x44D4, 0x00000000},
+ {0x44D8, 0x00000000},
+ {0x44DC, 0x00000000},
+ {0x44E0, 0x00000000},
+ {0x44E4, 0x00000000},
+ {0x44E8, 0x00000000},
+ {0x44EC, 0x00000000},
+ {0x44F0, 0x00000000},
+ {0x44F4, 0x00000000},
+ {0x44F8, 0x00000000},
+ {0x44FC, 0x00000000},
+ {0x4500, 0x00000000},
+ {0x4504, 0x00000000},
+ {0x4508, 0x00000000},
+ {0x450C, 0x00000000},
+ {0x4510, 0x00000000},
+ {0x4514, 0x00000000},
+ {0x4518, 0x00000000},
+ {0x451C, 0x00000000},
+ {0x4520, 0x00000000},
+ {0x4524, 0x00000000},
+ {0x4528, 0x00000000},
+ {0x452C, 0x00000000},
+ {0x4530, 0x4ED80C81},
+ {0x4534, 0x00001808},
+ {0x4538, 0x000000FF},
+ {0x453C, 0x00000000},
+ {0x4540, 0x00000000},
+ {0x4544, 0x00000000},
+ {0x4548, 0x00000000},
+ {0x454C, 0x00000000},
+ {0x4550, 0x00000000},
+ {0x4554, 0x00000000},
+ {0x4558, 0x00000000},
+ {0x455C, 0x00000000},
+ {0x4560, 0x40600033},
+ {0x4564, 0x40000000},
+ {0x4568, 0x00000000},
+ {0x456C, 0x20000000},
+ {0x4570, 0x04AAA407},
+ {0x4574, 0x0001A2B4},
+ {0x4578, 0x0002024B},
+ {0x457C, 0x00200000},
+ {0x4580, 0x00001B40},
+ {0x4584, 0x00000000},
+ {0x4588, 0x000000C8},
+ {0x458C, 0x30000000},
+ {0x4590, 0x00000000},
+ {0x4594, 0x00000000},
+ {0x4598, 0x00000001},
+ {0x459C, 0x0003FE00},
+ {0x45A0, 0x00000000},
+ {0x45A4, 0x00000000},
+ {0x45A8, 0xC00002C0},
+ {0x45AC, 0x78028000},
+ {0x45B0, 0x80000048},
+ {0x45B4, 0x00098800},
+ {0x45B8, 0x00200002},
+ {0x4AB4, 0x00000000},
+ {0x4AB8, 0x00000000},
+ {0x4ABC, 0x00000000},
+ {0x4AC0, 0x00000000},
+ {0x4AC4, 0x00000000},
+ {0x4AC8, 0x00000000},
+ {0x4AF4, 0x00000000},
+ {0x4AF8, 0x00000000},
+ {0x4AFC, 0x00000000},
+ {0x4B00, 0x00000000},
+ {0x4B04, 0x00000000},
+ {0x4B08, 0x00000000},
+ {0x4B0C, 0x00000000},
+ {0x4B10, 0x00000000},
+ {0x4B14, 0x00000000},
+ {0x4B18, 0xB0000000},
+ {0x4B1C, 0x00000000},
+ {0x4B20, 0x00000000},
+ {0x4B24, 0x00000000},
+ {0x4B28, 0x00000000},
+ {0x4B2C, 0x00000000},
+ {0x4B30, 0x00000000},
+ {0x4B34, 0x00000000},
+ {0x4B38, 0x00000000},
+ {0x4B3C, 0x00000000},
+ {0x4B40, 0x00000000},
+ {0x45BC, 0x06748790},
+ {0x45C0, 0x80000000},
+ {0x45C4, 0x00000000},
+ {0x45C8, 0x00000000},
+ {0x45CC, 0x00558670},
+ {0x45D0, 0x002883F0},
+ {0x45D4, 0x00090120},
+ {0x45D8, 0x00000000},
+ {0x4B44, 0x00000100},
+ {0x4B48, 0xA6DBC4B1},
+ {0x4B4C, 0x64F624C3},
+ {0x4B50, 0x00D4EF15},
+ {0x49B0, 0x11110F0A},
+ {0x49B4, 0x00000003},
+ {0x49B8, 0x0000000A},
+ {0x4B54, 0xBE9007FF},
+ {0x4B58, 0x00000001},
+ {0x49C0, 0x00000007},
+ {0x49C4, 0x000003D9},
+ {0x4A10, 0x00000001},
+ {0x49C8, 0x002B1CB0},
+ {0x4A00, 0xC0000000},
+ {0x4A04, 0x00001000},
+ {0x4B5C, 0x00000005},
+ {0x4A18, 0x00000007},
+ {0x4B60, 0x00000024},
+ {0x49CC, 0x00000001},
+ {0x49D0, 0x00000010},
+ {0x49D4, 0x00000001},
+ {0x4B64, 0x927FBFBF},
+ {0x4B68, 0x1D07BDD0},
+ {0x4B6C, 0x318A4DEF},
+ {0x4B70, 0x158C5318},
+ {0x4B74, 0x18C5318C},
+ {0x4B78, 0x4E7394EC},
+ {0x4B7C, 0xD9081CE5},
+ {0x4B80, 0x00000001},
+ {0x49EC, 0x00000001},
+ {0x4B84, 0x00000000},
+ {0x4B88, 0x00000000},
+ {0x4B8C, 0x00000000},
+ {0x4B90, 0x00000000},
+ {0x4B94, 0x00000000},
+ {0x4B98, 0x00000000},
+ {0x4B9C, 0x00000000},
+ {0x4BA0, 0x00000000},
+ {0x4BA4, 0x00EA99A2},
+ {0x49F8, 0x0000C4C3},
+ {0x4A1C, 0x00020800},
+ {0x4A20, 0x0002CC00},
+ {0x4BA8, 0x002B6456},
+ {0x45E0, 0x00000000},
+ {0x45E4, 0x00000000},
+ {0x45E8, 0x00E2E1E1},
+ {0x45EC, 0xCBCBB6B6},
+ {0x45F0, 0x59100FCA},
+ {0x4BAC, 0x12CAB6DE},
+ {0x4BB0, 0x00001110},
+ {0x45F4, 0x08882550},
+ {0x45F8, 0x08CC2660},
+ {0x45FC, 0x09102660},
+ {0x4600, 0x00000154},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xD1B942F4},
+ {0x4660, 0x41250EF4},
+ {0x4664, 0x6750E458},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0xA0000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0xB0000000, 0x00000000},
+ {0x4668, 0x0E0CFB0A},
+ {0x466C, 0x30100F06},
+ {0x4670, 0x34333333},
+ {0x4674, 0x34343434},
+ {0x4678, 0xC39D38E8},
+ {0x467C, 0x482800E3},
+ {0x4680, 0x5836E46A},
+ {0x4684, 0xFBEBDA00},
+ {0x4688, 0x1A10FF04},
+ {0x468C, 0x282A3000},
+ {0x4690, 0x2A29292A},
+ {0x4694, 0x04FA2A2A},
+ {0x4698, 0xEE0F04D1},
+ {0x469C, 0x89291436},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0xA0000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0xB0000000, 0x00000000},
+ {0x46A4, 0x08D07CFF},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x4D1E7F14},
+ {0x46AC, 0x60B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0xA0000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0xB0000000, 0x00000000},
+ {0x46B0, 0x63666666},
+ {0x46B4, 0x35374425},
+ {0x46B8, 0x25883043},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x3FFFFD63},
+ {0x4724, 0xB58D11FF},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x27795843},
+ {0x4724, 0xB58D11F5},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x27795303},
+ {0x4724, 0xB58D11F5},
+ {0xA0000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x3FFFFD63},
+ {0x4724, 0xB58D11FF},
+ {0xB0000000, 0x00000000},
+ {0x4728, 0x07FFFFFF},
+ {0x472C, 0x0E7893B6},
+ {0x4730, 0xE0399201},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x00000020},
+ {0x4738, 0x8325C500},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D4C20},
+ {0x4738, 0x8F25C500},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D5420},
+ {0x4738, 0x8725C500},
+ {0xA0000000, 0x00000000},
+ {0x4734, 0x00000020},
+ {0x4738, 0x8325C500},
+ {0xB0000000, 0x00000000},
+ {0x473C, 0x00000B7F},
+ {0x4ACC, 0x000F7D00},
+ {0x4AD0, 0x00000000},
+ {0x4AD4, 0x00000040},
+ {0x4AE4, 0x5379E99E},
+ {0x4AE8, 0x00000744},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0x05EBC8AF},
+ {0x4BB8, 0x99543D24},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0xA0000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0xB0000000, 0x00000000},
+ {0x4BBC, 0x12EED5B8},
+ {0x4BC0, 0x80C4542F},
+ {0x4BC4, 0x005A007F},
+ {0x4BC8, 0x40000000},
+ {0x4BCC, 0x40000000},
+ {0x4BD0, 0x00000000},
+ {0x4BD4, 0x40000000},
+ {0x4BD8, 0xC0000000},
+ {0x4BDC, 0x40000000},
+ {0x4BE0, 0x80000000},
+ {0x4BE4, 0xBAAC8000},
+ {0x4BE8, 0x638A88C5},
+ {0x4BEC, 0x00900000},
+ {0x4EAC, 0x00000000},
+ {0x4BF0, 0x00000000},
+ {0x4BF4, 0x00000000},
+ {0x4BF8, 0x00000219},
+ {0x4EC4, 0x00000001},
+ {0x4EE8, 0x00002020},
+ {0x4BFC, 0x00000000},
+ {0x4C00, 0x00000010},
+ {0x4C04, 0x00000001},
+ {0x4C08, 0x00000001},
+ {0x4C0C, 0x00000000},
+ {0x4C10, 0x00000000},
+ {0x4C14, 0x00000151},
+ {0x4C18, 0x00000000},
+ {0x4C1C, 0x00000000},
+ {0x4C20, 0x00000151},
+ {0x4C24, 0x00000498},
+ {0x4C28, 0x00000498},
+ {0x4C2C, 0x00000498},
+ {0x4C30, 0x00000498},
+ {0x4C34, 0x00000498},
+ {0x4C38, 0x00000498},
+ {0x4C3C, 0x00000498},
+ {0x4C40, 0x00000498},
+ {0x4C44, 0x00000000},
+ {0x4C48, 0x00000000},
+ {0x4C4C, 0x00001146},
+ {0x4C50, 0x00000000},
+ {0x4C54, 0x00000000},
+ {0x4C58, 0x00001146},
+ {0x4C5C, 0x00000000},
+ {0x4C60, 0x00000000},
+ {0x4C64, 0xE2E1E1DE},
+ {0x4C68, 0xB6B600B6},
+ {0x4C6C, 0xCACBCBCA},
+ {0x4C70, 0x8091010F},
+ {0x4C74, 0x00000B11},
+ {0x46C8, 0x08882550},
+ {0x46CC, 0x08CC2660},
+ {0x46D0, 0x09102660},
+ {0x46D4, 0x00000154},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xC5AD42F4},
+ {0x4744, 0x412504E8},
+ {0x4748, 0x6850E459},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0xA0000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0xB0000000, 0x00000000},
+ {0x474C, 0x0E0CFB0A},
+ {0x4750, 0x30100F06},
+ {0x4754, 0x34333333},
+ {0x4758, 0x34343434},
+ {0x475C, 0xC49E38E8},
+ {0x4760, 0x482800E2},
+ {0x4764, 0x5636E466},
+ {0x4768, 0xFBEBDA00},
+ {0x476C, 0x1A10FF04},
+ {0x4770, 0x282A3000},
+ {0x4774, 0x2A29292A},
+ {0x4778, 0x04FA2A2A},
+ {0x477C, 0xEE0F04D1},
+ {0x49F0, 0x89291436},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0xA0000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0xB0000000, 0x00000000},
+ {0x49FC, 0x08D07CFF},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x4D1E7F14},
+ {0x4A60, 0x60B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0xA0000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0xB0000000, 0x00000000},
+ {0x4A64, 0x63666666},
+ {0x4A68, 0x35374425},
+ {0x4A6C, 0x25883043},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x3FFFFD63},
+ {0x4A78, 0xB58D11FF},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x27795843},
+ {0x4A78, 0xB58D11F5},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x27795303},
+ {0x4A78, 0xB58D11F5},
+ {0xA0000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x3FFFFD63},
+ {0x4A78, 0xB58D11FF},
+ {0xB0000000, 0x00000000},
+ {0x4A7C, 0x07FFFFFF},
+ {0x4A80, 0x0E7893B6},
+ {0x4A9C, 0xE0399201},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x00000020},
+ {0x4AA4, 0x8325C500},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D4C20},
+ {0x4AA4, 0x8F25C500},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D5420},
+ {0x4AA4, 0x8725C500},
+ {0xA0000000, 0x00000000},
+ {0x4AA0, 0x00000020},
+ {0x4AA4, 0x8325C500},
+ {0xB0000000, 0x00000000},
+ {0x4AA8, 0x00000B7F},
+ {0x4AD8, 0x000F7D00},
+ {0x4ADC, 0x00000000},
+ {0x4AE0, 0x00000040},
+ {0x4AEC, 0x5379E99E},
+ {0x4AF0, 0x00000744},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0x07ECC9B0},
+ {0x4C7C, 0x995B4126},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0xA0000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0xB0000000, 0x00000000},
+ {0x4C80, 0x12EED5B8},
+ {0x4C84, 0x80C4542F},
+ {0x4C88, 0x005A007F},
+ {0x4C8C, 0x40000000},
+ {0x4C90, 0x40000000},
+ {0x4C94, 0x00000000},
+ {0x4C98, 0x40000000},
+ {0x4C9C, 0xC0000000},
+ {0x4CA0, 0x40000000},
+ {0x4CA4, 0x80000000},
+ {0x4CA8, 0xBAAC8000},
+ {0x4CAC, 0x638A88C5},
+ {0x4CB0, 0x00900000},
+ {0x4EB0, 0x00000000},
+ {0x4CB4, 0x00000000},
+ {0x4CB8, 0x00000000},
+ {0x4CBC, 0x00000219},
+ {0x4EC8, 0x00000001},
+ {0x4EEC, 0x00002020},
+ {0x4CC0, 0x00000000},
+ {0x4CC4, 0x00000010},
+ {0x4CC8, 0x00000001},
+ {0x4CCC, 0x00000001},
+ {0x4CD0, 0x00000000},
+ {0x4CD4, 0x00000000},
+ {0x4CD8, 0x00000151},
+ {0x4CDC, 0x00000000},
+ {0x4CE0, 0x00000000},
+ {0x4CE4, 0x00000151},
+ {0x4CE8, 0x00000498},
+ {0x4CEC, 0x00000498},
+ {0x4CF0, 0x00000498},
+ {0x4CF4, 0x00000498},
+ {0x4CF8, 0x00000498},
+ {0x4CFC, 0x00000498},
+ {0x4D00, 0x00000498},
+ {0x4D04, 0x00000498},
+ {0x4D08, 0x00000000},
+ {0x4D0C, 0x00000000},
+ {0x4D10, 0x00001146},
+ {0x4D14, 0x00000000},
+ {0x4D18, 0x00000000},
+ {0x4D1C, 0x00001146},
+ {0x4788, 0x00000000},
+ {0x478C, 0xA32103FE},
+ {0x4790, 0xB20A7B28},
+ {0x4794, 0xC6A7B14F},
+ {0x4798, 0x000000D3},
+ {0x4D20, 0x00000000},
+ {0x4D24, 0x0C442416},
+ {0x4D28, 0x00000000},
+ {0x479C, 0x009B902A},
+ {0x47A0, 0x009B902A},
+ {0x47A4, 0x98682C18},
+ {0x47A8, 0x6318C4C1},
+ {0x47AC, 0x6248C631},
+ {0x47B0, 0x922A8253},
+ {0x47B4, 0x00000005},
+ {0x4D2C, 0x0008C0C1},
+ {0x47B8, 0x00001759},
+ {0x47BC, 0x4B702400},
+ {0x47C0, 0x831508BA},
+ {0x4A14, 0x000000E9},
+ {0x4D30, 0x00000001},
+ {0x4E94, 0x000000FC},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x47CC, 0xBBCCBBB3},
+ {0x47D0, 0x57889989},
+ {0x47D4, 0x00000F45},
+ {0x4D34, 0x7BB167AB},
+ {0x4D38, 0xBBBBBB05},
+ {0x4D3C, 0x777777BB},
+ {0x4D40, 0x00015277},
+ {0x47D8, 0x27039CE9},
+ {0x47DC, 0x41414432},
+ {0x47E0, 0x36058342},
+ {0x47E4, 0x00000006},
+ {0x4D44, 0x00000687},
+ {0x47E8, 0x00000001},
+ {0x47EC, 0x00000001},
+ {0x47F0, 0xC7013016},
+ {0x47F4, 0x84413016},
+ {0x47F8, 0x84413016},
+ {0x47FC, 0x8C413016},
+ {0x4800, 0x8C40B028},
+ {0x4804, 0x3140B028},
+ {0x4808, 0x2940B028},
+ {0x480C, 0x8440B028},
+ {0x4810, 0x6318C610},
+ {0x4814, 0x45334753},
+ {0x4818, 0x236A6A88},
+ {0x4D48, 0x8C413016},
+ {0x4D4C, 0xA140B028},
+ {0x4D50, 0x00150A31},
+ {0x481C, 0x576DF814},
+ {0x4820, 0xA08877AC},
+ {0x4824, 0x0000007A},
+ {0x4D54, 0x00001184},
+ {0x4828, 0xBCEB4A14},
+ {0x482C, 0x000A3A4A},
+ {0x4830, 0xBCEB4A14},
+ {0x4834, 0x000A3A4A},
+ {0x4D58, 0x2F63DD3A},
+ {0x4838, 0xBCBDBD85},
+ {0x483C, 0x0CABB99A},
+ {0x4D5C, 0x000000BC},
+ {0x4840, 0x38384242},
+ {0x4844, 0x0086102E},
+ {0x4848, 0xCA24C82A},
+ {0x4D60, 0x00000000},
+ {0x4D64, 0x0000F49D},
+ {0x4ED8, 0x00000001},
+ {0x4D68, 0x000001C4},
+ {0x4D6C, 0x00000000},
+ {0x4D70, 0x38384242},
+ {0x4D74, 0x030E902E},
+ {0x4D78, 0x994C1502},
+ {0x4D7C, 0x00017912},
+ {0x4EDC, 0x00000001},
+ {0x484C, 0x00008A62},
+ {0x4D80, 0x00000002},
+ {0x4850, 0x00000008},
+ {0x4854, 0x009B902A},
+ {0x4858, 0x009B902A},
+ {0x485C, 0x98682C18},
+ {0x4860, 0x6318C4C1},
+ {0x4864, 0x6248C631},
+ {0x4868, 0x922A8253},
+ {0x486C, 0x00000005},
+ {0x4D84, 0x0008C0C1},
+ {0x4870, 0x00001759},
+ {0x4874, 0x4B702400},
+ {0x4878, 0x831508BA},
+ {0x4A24, 0x000000E9},
+ {0x4D88, 0x00000001},
+ {0x4E98, 0x000000FC},
+ {0x487C, 0x9898A8BB},
+ {0x4880, 0x54535368},
+ {0x4884, 0x999999B3},
+ {0x4888, 0x35555589},
+ {0x488C, 0x00000745},
+ {0x4D8C, 0x6AB14487},
+ {0x4D90, 0xBBBBBB04},
+ {0x4D94, 0x777777BB},
+ {0x4D98, 0x00015277},
+ {0x4890, 0x27039CE9},
+ {0x4894, 0x41414432},
+ {0x4898, 0x36058342},
+ {0x489C, 0x00000006},
+ {0x4D9C, 0x00000687},
+ {0x48A0, 0x00000001},
+ {0x48A4, 0x00000001},
+ {0x48A8, 0xC7013016},
+ {0x48AC, 0x84413016},
+ {0x48B0, 0x84413016},
+ {0x48B4, 0x8C413016},
+ {0x48B8, 0x8C40B028},
+ {0x48BC, 0x3140B028},
+ {0x48C0, 0x2940B028},
+ {0x48C4, 0x8440B028},
+ {0x48C8, 0x6318C610},
+ {0x48CC, 0x45334753},
+ {0x48D0, 0x236A6A88},
+ {0x4DA0, 0x8C413016},
+ {0x4DA4, 0xA140B028},
+ {0x4DA8, 0x00150A31},
+ {0x48D4, 0x576DF814},
+ {0x48D8, 0xA08877AC},
+ {0x48DC, 0x0000007A},
+ {0x4DAC, 0x00001184},
+ {0x48E0, 0xBCEB4A14},
+ {0x48E4, 0x000A3A4A},
+ {0x48E8, 0xBCEB4A14},
+ {0x48EC, 0x000A3A4A},
+ {0x4DB0, 0x2F63DD3A},
+ {0x48F0, 0x9A8A8A85},
+ {0x48F4, 0x0C9BB99A},
+ {0x4DB4, 0x0000009A},
+ {0x48F8, 0x38384242},
+ {0x48FC, 0x0086102E},
+ {0x4900, 0xCA24C82A},
+ {0x4DB8, 0x00000000},
+ {0x4DBC, 0x0000F49D},
+ {0x4EE0, 0x00000001},
+ {0x4DC0, 0x000001C4},
+ {0x4DC4, 0x00000000},
+ {0x4DC8, 0x38384242},
+ {0x4DCC, 0x030E902E},
+ {0x4DD0, 0x994C1502},
+ {0x4DD4, 0x00017912},
+ {0x4EE4, 0x00000001},
+ {0x4904, 0x00008A62},
+ {0x4DD8, 0x00000002},
+ {0x4908, 0x00000008},
+ {0x490C, 0x80040000},
+ {0x4910, 0x80040000},
+ {0x4914, 0xFE800000},
+ {0x4918, 0x834C0000},
+ {0x491C, 0x00000000},
+ {0x4920, 0x00000000},
+ {0x4924, 0x000003FF},
+ {0x4928, 0x00000000},
+ {0x492C, 0x00000000},
+ {0x4930, 0x00000000},
+ {0x4934, 0x40000000},
+ {0x4938, 0x00000000},
+ {0x493C, 0x00000000},
+ {0x4940, 0x00000000},
+ {0x4944, 0x00000000},
+ {0x4948, 0x04065800},
+ {0x494C, 0x02010080},
+ {0x4950, 0x0E1E3E05},
+ {0x4954, 0x0A163068},
+ {0x4958, 0x00206040},
+ {0x495C, 0x02020202},
+ {0x4960, 0x00002020},
+ {0x4DDC, 0x18002000},
+ {0x4DE0, 0x00004001},
+ {0x4DE4, 0x00040004},
+ {0x4DE8, 0x00400040},
+ {0x4DEC, 0x04000400},
+ {0x4DF0, 0x08080618},
+ {0x4DF4, 0x08081616},
+ {0x4DF8, 0x08080808},
+ {0x4DFC, 0x18180808},
+ {0x4E00, 0x01020100},
+ {0x4E04, 0x05020502},
+ {0x4E08, 0x00020E0F},
+ {0x4E0C, 0x00000000},
+ {0x4E10, 0x16080806},
+ {0x4E14, 0x08080816},
+ {0x4E18, 0x08080808},
+ {0x4E1C, 0x00181808},
+ {0x4E20, 0x02010201},
+ {0x4E24, 0x0F050205},
+ {0x4E28, 0x0000020E},
+ {0x4E2C, 0x00000000},
+ {0x4E70, 0x00000001},
+ {0x4970, 0x00000000},
+ {0x4974, 0xC00CD62D},
+ {0x4978, 0x00000103},
+ {0x4E30, 0x02E416A8},
+ {0x497C, 0x00000000},
+ {0x4980, 0x00000000},
+ {0x4984, 0x00000000},
+ {0x4988, 0x00000000},
+ {0x498C, 0x00000000},
+ {0x4E34, 0x00FC0000},
+ {0x4E38, 0x0000F800},
+ {0x4E3C, 0x00000001},
+ {0x4990, 0x00000000},
+ {0x4994, 0x00000000},
+ {0x4998, 0x00000000},
+ {0x499C, 0x00000000},
+ {0x49A0, 0x00000000},
+ {0x4E40, 0x00FC0000},
+ {0x4E44, 0x0000F800},
+ {0x4E48, 0x00000001},
+ {0xC54, 0x10014368},
+ {0xC58, 0x61000000},
+ {0xC5C, 0x805580F0},
+ {0xC64, 0x0010A030},
+ {0x189C, 0x000003FF},
+ {0xC6C, 0x00060020},
+ {0xC3C, 0x2840E1BF},
+ {0xC40, 0x00000000},
+ {0xC44, 0x00000007},
+ {0xC48, 0x410E4000},
+ {0xC54, 0x1EE1436A},
+ {0xC58, 0x61000000},
+ {0x730, 0x00000002},
+ {0xC60, 0x017FFFF2},
+ {0xC64, 0x0010A170},
+ {0xC64, 0x0010A170},
+ {0xC68, 0x000000FF},
+ {0xC64, 0x0010A130},
+ {0xC54, 0x1AE1436A},
+ {0xC6C, 0x00060020},
+ {0xC58, 0x41000000},
+ {0x708, 0x00000000},
+ {0xC6C, 0x00061020},
+ {0x884, 0x0043F01D},
+ {0x704, 0x601E0100},
+ {0x710, 0xEF810000},
+ {0xC54, 0x1AE1436A},
+ {0xC58, 0x41000000},
+ {0xC68, 0x10000050},
+ {0xC6C, 0x20061020},
+ {0x704, 0x601E0100},
+ {0xC74, 0x00000000},
+ {0x90C, 0x00300000},
+ {0xC70, 0x071BFC00},
+ {0xC74, 0x3FFFFFFF},
+ {0xC78, 0x3FFFFFFF},
+ {0xC7C, 0x0000BFFF},
+ {0xD40, 0xF64FA0F7},
+ {0xD44, 0x0400463F},
+ {0xD48, 0x0003FFFF},
+ {0xD4C, 0x00000000},
+ {0xD50, 0xF64FA0F7},
+ {0xD54, 0x04100437},
+ {0xD58, 0x0000FF7F},
+ {0xD5C, 0x00000000},
+ {0xD60, 0x00000000},
+ {0xD64, 0x00000000},
+ {0xD70, 0x00000015},
+ {0xD90, 0x000003FF},
+ {0xD94, 0x00000000},
+ {0xD98, 0x0000003F},
+ {0xD9C, 0x00000000},
+ {0xDA0, 0x000003FE},
+ {0xDA4, 0x00000000},
+ {0xDA8, 0x0000003F},
+ {0xDAC, 0x00000000},
+ {0xD00, 0x77777777},
+ {0xD04, 0xBBBBBBBB},
+ {0xD08, 0xBBBBBBBB},
+ {0xD0C, 0x00000070},
+ {0xD10, 0x20110900},
+ {0xD10, 0x20110FFF},
+ {0xD78, 0x00000001},
+ {0xD7C, 0x001C040A},
+ {0xD84, 0x00006007},
+ {0xD84, 0x00006607},
+ {0xD10, 0x28110FFF},
+ {0xD18, 0x50209900},
+ {0xD80, 0x00804100},
+ {0xD80, 0x00804200},
+ {0x718, 0x1333233F},
+ {0x604, 0x041E1E1E},
+ {0x714, 0x00010000},
+ {0x586C, 0x000000F0},
+ {0x586C, 0x000000E0},
+ {0x586C, 0x000000D0},
+ {0x586C, 0x000000C0},
+ {0x586C, 0x000000B0},
+ {0x586C, 0x000000A0},
+ {0x586C, 0x00000090},
+ {0x586C, 0x00000080},
+ {0x586C, 0x00000070},
+ {0x586C, 0x00000060},
+ {0x586C, 0x00000050},
+ {0x586C, 0x00000040},
+ {0x586C, 0x00000030},
+ {0x586C, 0x00000020},
+ {0x586C, 0x00000010},
+ {0x586C, 0x00000000},
+ {0x786C, 0x000000F0},
+ {0x786C, 0x000000E0},
+ {0x786C, 0x000000D0},
+ {0x786C, 0x000000C0},
+ {0x786C, 0x000000B0},
+ {0x786C, 0x000000A0},
+ {0x786C, 0x00000090},
+ {0x786C, 0x00000080},
+ {0x786C, 0x00000070},
+ {0x786C, 0x00000060},
+ {0x786C, 0x00000050},
+ {0x786C, 0x00000040},
+ {0x786C, 0x00000030},
+ {0x786C, 0x00000020},
+ {0x786C, 0x00000010},
+ {0x786C, 0x00000000},
+ {0x304, 0x0CE31333},
+ {0x300, 0xF30CE31C},
+ {0x304, 0x13EF1F19},
+ {0x308, 0x0C13E3F3},
+ {0x30C, 0x130C0C0C},
+ {0x310, 0x80496000},
+ {0x314, 0x0041E000},
+ {0x318, 0x20022042},
+ {0x31C, 0x20448009},
+ {0x320, 0x00490040},
+ {0x324, 0xE0000070},
+ {0x328, 0xE000E000},
+ {0x32C, 0x0041E000},
+ {0x35C, 0x000004C4},
+ {0xC0D4, 0xA7C41460},
+ {0xC0D8, 0xC6BA7F67},
+ {0xC0DC, 0x30C52868},
+ {0xC0E0, 0x75008128},
+ {0xC0E4, 0x0000272B},
+ {0xC1D4, 0xA7C41460},
+ {0xC1D8, 0xC6BA7F67},
+ {0xC1DC, 0x30C52868},
+ {0xC1E0, 0x75008128},
+ {0xC1E4, 0x0000272B},
+ {0xC0EC, 0x00030003},
+ {0xC1EC, 0x00030003},
+ {0xC004, 0x03020000},
+ {0xC024, 0x03020000},
+ {0xC104, 0x03020000},
+ {0xC124, 0x03020000},
+ {0xC0E8, 0x000A0C81},
+ {0xC0F0, 0x00000024},
+ {0xC1E8, 0x000A0C81},
+ {0xC1F0, 0x00000024},
+ {0x334, 0xFFFFFFFF},
+ {0x33C, 0x55000000},
+ {0x340, 0x00005555},
+ {0x724, 0x00111201},
+ {0x5868, 0xA9550000},
+ {0x5870, 0x33221100},
+ {0x5874, 0x77665544},
+ {0x5878, 0xBBAA9988},
+ {0x587C, 0xFFEEDDCC},
+ {0x5880, 0x76543210},
+ {0x5884, 0xFEDCBA98},
+ {0x5888, 0x00000000},
+ {0x588C, 0x00000000},
+ {0x5894, 0x00000008},
+ {0x7868, 0xA9550000},
+ {0x7870, 0x33221100},
+ {0x7874, 0x77665544},
+ {0x7878, 0xBBAA9988},
+ {0x787C, 0xFFEEDDCC},
+ {0x7880, 0x76543210},
+ {0x7884, 0xFEDCBA98},
+ {0x7888, 0x00000000},
+ {0x788C, 0x00000000},
+ {0x7894, 0x00000008},
+ {0x650, 0x00200888},
+ {0x710, 0xF3810000},
+ {0x020, 0x0000F381},
+ {0x024, 0x0000F381},
+ {0xC0A8, 0x00000080},
+ {0xC0AC, 0x00000100},
+ {0xC0B8, 0x00020000},
+ {0xC1A8, 0x00000080},
+ {0xC1AC, 0x00000100},
+ {0xC1B8, 0x00020000},
+ {0x1038, 0x00003100},
+ {0x1038, 0x00003100},
+ {0x3038, 0x00003100},
+ {0x3038, 0x00003100},
+ {0xC14, 0xA5000000},
+ {0x908, 0x00000001},
+ {0xC54, 0x1EE14368},
+ {0xC88, 0xC2AC8000},
+ {0xC8C, 0x02F2FC08},
+ {0xC70, 0x071BFC00},
+ {0x980, 0x10002251},
+ {0x988, 0x3C3C4107},
+ {0x904, 0x00000005},
+ {0x994, 0x00000010},
+ {0x000, 0x0580801F},
+ {0x240C, 0x00000000},
+ {0x010, 0x000C01FF},
+ {0x010, 0x001C01FF},
+ {0x2424, 0x00000008},
+ {0x620, 0x00141A30},
+ {0x660, 0x00000004},
+ {0x2620, 0x00141A30},
+ {0x2660, 0x00000000},
+ {0x640, 0x180A141E},
+ {0x640, 0x1814141E},
+ {0x640, 0x1814141E},
+ {0x640, 0x14141414},
+ {0x644, 0x3C14283C},
+ {0x644, 0x3C29283C},
+ {0x644, 0x3C29203C},
+ {0x644, 0x3C29201A},
+ {0x2640, 0x180A141E},
+ {0x2640, 0x1814141E},
+ {0x2640, 0x1814141E},
+ {0x2640, 0x14141414},
+ {0x2644, 0x3C14283C},
+ {0x2644, 0x3C29283C},
+ {0x2644, 0x3C29203C},
+ {0x2644, 0x3C29201A},
+ {0x620, 0x00141A40},
+ {0x64C, 0x1D0A141E},
+ {0x64C, 0x1D1D141E},
+ {0x64C, 0x1D1D1D1E},
+ {0x2620, 0x00141A40},
+ {0x264C, 0x1D0A141E},
+ {0x264C, 0x1D1D141E},
+ {0x264C, 0x1D1D1D1E},
+ {0x2300, 0x03020100},
+ {0x2304, 0x07060504},
+ {0x2308, 0x0B0A0908},
+ {0x230C, 0x0F0E0D0C},
+ {0x2310, 0x13121110},
+ {0x2314, 0x17161514},
+ {0x2318, 0x00000018},
+ {0x231C, 0x00C00000},
+ {0x2320, 0x00000000},
+ {0x2324, 0x0005298F},
+ {0x2328, 0x0015296E},
+ {0x232C, 0x0D3B5200},
+ {0x2330, 0x00000000},
+ {0x2334, 0x00000000},
+ {0x2338, 0x00000000},
+ {0x233C, 0x00000402},
+ {0x2340, 0x00020080},
+ {0x2344, 0x03C00000},
+ {0x2348, 0x0001FFFF},
+ {0x234C, 0x00C80064},
+ {0x2350, 0x0190012C},
+ {0x2354, 0x000032FE},
+ {0x2358, 0xF0203C28},
+ {0x235C, 0xF027C000},
+ {0x2360, 0x01210C00},
+ {0x2320, 0x00000001},
+ {0x2300, 0x0C811B40},
+ {0x2304, 0xF3FC4ED8},
+ {0x2308, 0x08FF808F},
+ {0x230C, 0xFCBC80C8},
+ {0x2310, 0xBC80536C},
+ {0x2314, 0x0363A0F3},
+ {0x2318, 0x000000BB},
+ {0x724, 0x00111200},
+ {0x704, 0x601E0D00},
+ {0xC78, 0xBFFFFFFF},
+ {0x704, 0x601E0D02},
+ {0x704, 0x601E0D02},
+ {0x5864, 0x080801FF},
+ {0x7864, 0x080801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC6C, 0x20061021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8088, 0x007F0000},
+ {0x81A4, 0x003F3A00},
+ {0x81B4, 0x0100007F},
+ {0x81C0, 0x0060010B},
+ {0x81A0, 0x00000010},
+ {0x8138, 0x40000002},
+ {0x82A4, 0x003F3A00},
+ {0x82B4, 0x0100007F},
+ {0x82C0, 0x0060010B},
+ {0x82A0, 0x00000010},
+ {0x81A0, 0x00000010},
+ {0x8238, 0x40000002},
+ {0x8088, 0x00000000},
+ {0x8020, 0x00000000},
+ {0x8120, 0x00000000},
+ {0x8220, 0x00000000},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC70, 0x071BFE00},
+ {0xC70, 0x071BFE60},
+ {0xC6C, 0x20061021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8120, 0x10000000},
+ {0x8120, 0x10030000},
+ {0x8124, 0x00000F0F},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8220, 0x10000000},
+ {0x8220, 0x10030000},
+ {0x704, 0x601E0D00},
+ {0x5864, 0x100801FF},
+ {0x7864, 0x100801FF},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0x58D4, 0x7401FE00},
+ {0x78D4, 0x7401FE00},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x704, 0x601E0D02},
+ {0xC7C, 0x0020BFFF},
+ {0x58C0, 0x00FE0000},
+ {0x58FC, 0x00000000},
+ {0x566C, 0x00010005},
+ {0x566C, 0x00011005},
+ {0x700, 0x00000030},
+ {0x9D0, 0x00001001},
+ {0x704, 0x601E0D02},
+ {0x704, 0x601E0D00},
+ {0x704, 0x601C0502},
+ {0x000, 0x0580801F},
+ {0x980, 0x10002250},
+ {0x010, 0x001C01FF},
+ {0xC3C, 0x2840E1BF},
+ {0x12A8, 0x33337824},
+ {0x32A8, 0x33337824},
+ {0x620, 0x00141A40},
+ {0x2320, 0x00000000},
+ {0x664, 0x0000000C},
+ {0xC0F8, 0x00000001},
+ {0xC1F8, 0x00000001},
+ {0x2D7C, 0x739C040A},
+ {0x1010, 0x00000000},
+ {0x3010, 0x00000000},
+ {0x2C14, 0x80000005},
+ {0x5818, 0x082C1800},
+ {0x7818, 0x082C1800},
+ {0x624, 0x0101030A},
+ {0x028, 0x0000F381},
+ {0x02C, 0x0000F381},
+ {0x720, 0x20000000},
+ {0x1200, 0x00010142},
+ {0x12A0, 0x24903056},
+ {0x12AC, 0x12333121},
+ {0x12B8, 0x30020000},
+ {0x2000, 0x18BBBF84},
+ {0x2C14, 0x85000005},
+ {0x3200, 0x00010142},
+ {0x32A0, 0x24903056},
+ {0x32AC, 0x12333121},
+ {0x32B8, 0x30020000},
+ {0x5800, 0x03FF807F},
+ {0x5804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7800, 0x03FF807F},
+ {0x7804, 0x04237040},
+ {0x7808, 0x04237040},
+ {0x010, 0x001C61FF},
+ {0x56C8, 0x0E800400},
+ {0x76C8, 0x0E800400},
+ {0x984, 0x000000E0},
+ {0x2008, 0x000FFFFF},
+ {0x58B0, 0x00000800},
+ {0x5A00, 0x00000000},
+ {0x5A04, 0x00000000},
+ {0x5A08, 0x00000000},
+ {0x5A0C, 0x00000000},
+ {0x5A10, 0x00000000},
+ {0x5A14, 0x00000000},
+ {0x5A18, 0x00000000},
+ {0x5A1C, 0x00000000},
+ {0x5A20, 0x00000000},
+ {0x5A24, 0x00050000},
+ {0x5A28, 0x00000000},
+ {0x5A2C, 0x00000000},
+ {0x5A30, 0x00000000},
+ {0x5A34, 0x00000000},
+ {0x5A38, 0x00000000},
+ {0x5A3C, 0x00000000},
+ {0x5A40, 0x00000000},
+ {0x5A44, 0x00000005},
+ {0x5A48, 0x00000000},
+ {0x5A4C, 0x00000000},
+ {0x5A50, 0x00000000},
+ {0x5A54, 0x00000000},
+ {0x5A58, 0x00000000},
+ {0x5A5C, 0x00000000},
+ {0x5A60, 0x00050000},
+ {0x5A64, 0x00000000},
+ {0x5A68, 0x00000000},
+ {0x5A6C, 0x00000000},
+ {0x5A70, 0x00000000},
+ {0x5A74, 0x00000000},
+ {0x5A78, 0x00000000},
+ {0x5A7C, 0x00000000},
+ {0x5A80, 0x00000000},
+ {0x5A84, 0x00000000},
+ {0x5A88, 0x00000000},
+ {0x5A8C, 0x00000000},
+ {0x5A90, 0x00000000},
+ {0x5A94, 0x00000000},
+ {0x5A98, 0x00000000},
+ {0x5A9C, 0x00000000},
+ {0x5AA0, 0x00000000},
+ {0x5AA4, 0x00000000},
+ {0x5AA8, 0x00000000},
+ {0x5AAC, 0x00000000},
+ {0x5AB0, 0x00050005},
+ {0x5AB4, 0x00050005},
+ {0x5AB8, 0x00050005},
+ {0x5ABC, 0x00050005},
+ {0x5AC0, 0x00000005},
+ {0x78B0, 0x00000800},
+ {0x7A00, 0x00000000},
+ {0x7A04, 0x00000000},
+ {0x7A08, 0x00000000},
+ {0x7A0C, 0x00000000},
+ {0x7A10, 0x00000000},
+ {0x7A14, 0x00000000},
+ {0x7A18, 0x00000000},
+ {0x7A1C, 0x00000000},
+ {0x7A20, 0x00000000},
+ {0x7A24, 0x00050000},
+ {0x7A28, 0x00000000},
+ {0x7A2C, 0x00000000},
+ {0x7A30, 0x00000000},
+ {0x7A34, 0x00000000},
+ {0x7A38, 0x00000000},
+ {0x7A3C, 0x00000000},
+ {0x7A40, 0x00000000},
+ {0x7A44, 0x00000005},
+ {0x7A48, 0x00000000},
+ {0x7A4C, 0x00000000},
+ {0x7A50, 0x00000000},
+ {0x7A54, 0x00000000},
+ {0x7A58, 0x00000000},
+ {0x7A5C, 0x00000000},
+ {0x7A60, 0x00050000},
+ {0x7A64, 0x00000000},
+ {0x7A68, 0x00000000},
+ {0x7A6C, 0x00000000},
+ {0x7A70, 0x00000000},
+ {0x7A74, 0x00000000},
+ {0x7A78, 0x00000000},
+ {0x7A7C, 0x00000000},
+ {0x7A80, 0x00000000},
+ {0x7A84, 0x00000000},
+ {0x7A88, 0x00000000},
+ {0x7A8C, 0x00000000},
+ {0x7A90, 0x00000000},
+ {0x7A94, 0x00000000},
+ {0x7A98, 0x00000000},
+ {0x7A9C, 0x00000000},
+ {0x7AA0, 0x00000000},
+ {0x7AA4, 0x00000000},
+ {0x7AA8, 0x00000000},
+ {0x7AAC, 0x00000000},
+ {0x7AB0, 0x00050005},
+ {0x7AB4, 0x00050005},
+ {0x7AB8, 0x00050005},
+ {0x7ABC, 0x00050005},
+ {0x7AC0, 0x00000005},
+ {0x0F0, 0x00010000},
+ {0x0F4, 0x00000018},
+ {0x0F8, 0x20220120},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
+ {0xF0FF0000, 0x00000000},
+ {0xF03300FF, 0x00000001},
+ {0x000, 0x01E3C39F},
+ {0x001, 0x00694727},
+ {0x002, 0x00005536},
+ {0x100, 0x02E3C39F},
+ {0x101, 0x0069472A},
+ {0x102, 0x00005536},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10000, 0x1A02E1C9},
+ {0x10001, 0x00644A30},
+ {0x10002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10000, 0x0EF4D1B9},
+ {0x10001, 0x00584125},
+ {0x10002, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x10000, 0x1A02E1C9},
+ {0x10001, 0x00644A30},
+ {0x10002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10100, 0x1901E1C8},
+ {0x10101, 0x0061482D},
+ {0x10102, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10100, 0x04E8C5AD},
+ {0x10101, 0x00594125},
+ {0x10102, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x10100, 0x1901E1C8},
+ {0x10101, 0x0061482D},
+ {0x10102, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20000, 0x1601E2CA},
+ {0x20001, 0x005D452A},
+ {0x20002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20000, 0x0EF4D3BB},
+ {0x20001, 0x00563F25},
+ {0x20002, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x20000, 0x1601E2CA},
+ {0x20001, 0x005D452A},
+ {0x20002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20100, 0x1901E1C8},
+ {0x20101, 0x0061482D},
+ {0x20102, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20100, 0x0BF1CFB7},
+ {0x20101, 0x00574025},
+ {0x20102, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x20100, 0x1901E1C8},
+ {0x20101, 0x0061482D},
+ {0x20102, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30000, 0x1700E1CA},
+ {0x30001, 0x005E472B},
+ {0x30002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30000, 0x05EFCEB7},
+ {0x30001, 0x004B351A},
+ {0x30002, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x30000, 0x1700E1CA},
+ {0x30001, 0x005E472B},
+ {0x30002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30100, 0x14FEE0C9},
+ {0x30101, 0x00594428},
+ {0x30102, 0x00006650},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30100, 0x0CF2D1B9},
+ {0x30101, 0x00563F24},
+ {0x30102, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x30100, 0x14FEE0C9},
+ {0x30101, 0x00594428},
+ {0x30102, 0x00006650},
+ {0xB0000000, 0x00000000},
+ {0x40000, 0x13FCDDC8},
+ {0x40001, 0x005D4328},
+ {0x40002, 0x00006850},
+ {0x40100, 0x14FEE3CF},
+ {0x40101, 0x00583E24},
+ {0x40102, 0x00006850},
+ {0x50000, 0x0DF4D6C6},
+ {0x50001, 0x00604227},
+ {0x50002, 0x00006850},
+ {0x50100, 0x1903E7D5},
+ {0x50101, 0x0061462B},
+ {0x50102, 0x00006850},
+ {0x60000, 0x0FF5D7C6},
+ {0x60001, 0x005D4429},
+ {0x60002, 0x00006850},
+ {0x60100, 0x12FADECF},
+ {0x60101, 0x005B4126},
+ {0x60102, 0x00006850},
+ {0x70000, 0x09F1D2C3},
+ {0x70001, 0x00554026},
+ {0x70002, 0x00006750},
+ {0x70100, 0x0CF5DACC},
+ {0x70101, 0x00563E25},
+ {0x70102, 0x00006750},
+ {0x2000000, 0x02E4C4A0},
+ {0x2000001, 0x006A4828},
+ {0x2000100, 0x02E4C5A1},
+ {0x2000101, 0x00664629},
+ {0x2010000, 0x05EBC8AF},
+ {0x2010001, 0x00543D24},
+ {0x2010100, 0x07ECC9B0},
+ {0x2010101, 0x005B4126},
+ {0x2020000, 0x05EDCCB2},
+ {0x2020001, 0x004D361C},
+ {0x2020100, 0x06ECCBB2},
+ {0x2020101, 0x00553D22},
+ {0x2030000, 0x02ECCCB3},
+ {0x2030001, 0x00483118},
+ {0x2030100, 0x04ECCCB2},
+ {0x2030101, 0x004F381C},
+ {0x3000000, 0x00000000},
+ {0x3000001, 0x00000000},
+ {0x3000002, 0x00000000},
+ {0x3000003, 0x00000000},
+ {0x3000100, 0x00000000},
+ {0x3000101, 0x00000000},
+ {0x3000102, 0x00000000},
+ {0x3000103, 0x00000000},
+ {0x3010000, 0x0E0CFB0A},
+ {0x3010001, 0x00100F06},
+ {0x3010002, 0x34333333},
+ {0x3010003, 0x3434343C},
+ {0x3010100, 0x0E0CFB0A},
+ {0x3010101, 0x00100F06},
+ {0x3010102, 0x34333333},
+ {0x3010103, 0x3434343C},
+ {0x3020000, 0x0E0CFB0A},
+ {0x3020001, 0x00100F06},
+ {0x3020002, 0x34333333},
+ {0x3020003, 0x3434343C},
+ {0x3020100, 0x0E0CFB0A},
+ {0x3020101, 0x00100F06},
+ {0x3020102, 0x34333333},
+ {0x3020103, 0x3434343C},
+ {0x3030000, 0x0E0CFB0A},
+ {0x3030001, 0x00100F06},
+ {0x3030002, 0x34333333},
+ {0x3030003, 0x3434343C},
+ {0x3030100, 0x0E0CFB0A},
+ {0x3030101, 0x00100F06},
+ {0x3030102, 0x34333333},
+ {0x3030103, 0x3434343C},
+ {0x3040000, 0x0E0CFB0A},
+ {0x3040001, 0x00100F06},
+ {0x3040002, 0x343B3333},
+ {0x3040003, 0x34343C3C},
+ {0x3040100, 0x0E0CFB0A},
+ {0x3040101, 0x00100F06},
+ {0x3040102, 0x343B3333},
+ {0x3040103, 0x34343C3C},
+ {0x3050000, 0x0E0CFB0A},
+ {0x3050001, 0x00100F06},
+ {0x3050002, 0x343B3333},
+ {0x3050003, 0x34343C3C},
+ {0x3050100, 0x0E0CFB0A},
+ {0x3050101, 0x00100F06},
+ {0x3050102, 0x343B3333},
+ {0x3050103, 0x34343C3C},
+ {0x3060000, 0x0E0CFB0A},
+ {0x3060001, 0x00100F06},
+ {0x3060002, 0x3C3B3333},
+ {0x3060003, 0x34343C3C},
+ {0x3060100, 0x0E0CFB0A},
+ {0x3060101, 0x00100F06},
+ {0x3060102, 0x3C3B3333},
+ {0x3060103, 0x34343C3C},
+ {0x3070000, 0x0E0CFB0A},
+ {0x3070001, 0x00100F06},
+ {0x3070002, 0x3C3B3333},
+ {0x3070003, 0x34343C3C},
+ {0x3070100, 0x0E0CFB0A},
+ {0x3070101, 0x00100F06},
+ {0x3070102, 0x3C3B3333},
+ {0x3070103, 0x34343C3C},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0320000, 0x00000002},
+ {0xF0330000, 0x00000003},
+ {0xF0340000, 0x00000004},
+ {0xF0350000, 0x00000005},
+ {0xF0360000, 0x00000006},
+ {0xF0010001, 0x00000007},
+ {0xF0020001, 0x00000008},
+ {0xF0320001, 0x00000009},
+ {0xF0330001, 0x0000000A},
+ {0xF0340001, 0x0000000B},
+ {0xF0350001, 0x0000000C},
+ {0xF0360001, 0x0000000D},
+ {0xF03F0001, 0x0000000E},
+ {0xF0400001, 0x0000000F},
+ {0x005, 0x00000000},
+ {0x10005, 0x00000000},
+ {0x000, 0x00030001},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000620},
+ {0x03F, 0x0000020C},
+ {0x0EF, 0x00000000},
+ {0x05F, 0x00000032},
+ {0x097, 0x00043200},
+ {0x0A6, 0x00066DB7},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000003},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000002},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x0EF, 0x00000000},
+ {0x000, 0x00033C01},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x0FE, 0x00000000},
+ {0x096, 0x00015200},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0xA0000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0xB0000000, 0x00000000},
+ {0x057, 0x0000D589},
+ {0x05A, 0x0007FFFF},
+ {0x043, 0x00005000},
+ {0x0B5, 0x00001720},
+ {0x0ED, 0x00000080},
+ {0x033, 0x00000000},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x033, 0x00000010},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x033, 0x00000020},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x0ED, 0x00000000},
+ {0x0ED, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x000000FA},
+ {0x033, 0x00000001},
+ {0x03F, 0x000000F2},
+ {0x033, 0x00000002},
+ {0x03F, 0x000000EA},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000E2},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000DA},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000D2},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x0B9, 0x00020440},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0xA0000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0xB0000000, 0x00000000},
+ {0x0EB, 0x00000000},
+ {0x030, 0x000109B0},
+ {0x030, 0x000189B0},
+ {0x0EB, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000008},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000009},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000AFFF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000010},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000011},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000012},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000013},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000014},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000018},
+ {0x03F, 0x0000FBFF},
+ {0x033, 0x00000019},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000021},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000023},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000027},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000030},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000031},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000033},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000035},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000036},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000037},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000038},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000039},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000003A},
+ {0x03F, 0x0000EFFF},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000000},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000001},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000002},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000003},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000004},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000005},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000006},
+ {0x03F, 0x00004324},
+ {0x033, 0x00000007},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000008},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000009},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000010},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000011},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000012},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000013},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000014},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000015},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000016},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000017},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000018},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000019},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00004344},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000020},
+ {0x033, 0x00000010},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000011},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000012},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000013},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000200},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x030, 0x00068000},
+ {0x030, 0x00070000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000006},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000017},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000018},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00025A58},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x0000001A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000001B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000001C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x0000001E},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000001F},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000020},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000022},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000023},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000024},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000025},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000026},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000027},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000028},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000029},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x0000002C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000002F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000030},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000031},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000032},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000033},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000034},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000035},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000036},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000037},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000038},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000039},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000003B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x0000003C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000003F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000031},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000023},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EC, 0x00000400},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000030},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000021},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x0A7, 0x00080308},
+ {0x066, 0x00006000},
+ {0x0EF, 0x00000400},
+ {0x030, 0x000001FF},
+ {0x030, 0x000081FF},
+ {0x030, 0x000101FF},
+ {0x030, 0x000181FF},
+ {0x030, 0x000201FF},
+ {0x030, 0x000281FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0xB0000000, 0x00000000},
+ {0x030, 0x000380FB},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x06A, 0x000E0F8A},
+ {0x06B, 0x000018A0},
+ {0x06F, 0x000F81FC},
+ {0x05E, 0x0000001F},
+ {0x0EF, 0x00000200},
+ {0x030, 0x0003D407},
+ {0x030, 0x00035A87},
+ {0x030, 0x0002CF07},
+ {0x030, 0x00024F07},
+ {0x030, 0x0001CF07},
+ {0x030, 0x00014F07},
+ {0x030, 0x0000CF07},
+ {0x030, 0x00004F07},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00080000},
+ {0x030, 0x00008038},
+ {0x030, 0x00010038},
+ {0x030, 0x00018038},
+ {0x030, 0x00020038},
+ {0x030, 0x00028038},
+ {0x030, 0x00030038},
+ {0x030, 0x0003803C},
+ {0x030, 0x0004003C},
+ {0x030, 0x0004803C},
+ {0x030, 0x0005003C},
+ {0x030, 0x0005803C},
+ {0x030, 0x0006003C},
+ {0x030, 0x0006803C},
+ {0x030, 0x0007003C},
+ {0x0EB, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0xA0000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000030},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000034},
+ {0x03F, 0x000001E7},
+ {0x033, 0x00000038},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000003C},
+ {0x03F, 0x000003E7},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000031},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000035},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000039},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000003D},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000005A},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000032},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000036},
+ {0x03F, 0x000001E6},
+ {0x033, 0x0000003A},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000003E},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000060},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000064},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000068},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006C},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000070},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000074},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000078},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007C},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000061},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000065},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000069},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006D},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000071},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000075},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000079},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007D},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000062},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000066},
+ {0x03F, 0x0000005A},
+ {0x033, 0x0000006A},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006E},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000072},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000076},
+ {0x03F, 0x000001E6},
+ {0x033, 0x0000007A},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007E},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000007F},
+ {0x03F, 0x000003E7},
+ {0x0EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000000FC},
+ {0x10030, 0x000004F9},
+ {0x10030, 0x000008F6},
+ {0x10030, 0x00000CF3},
+ {0x10030, 0x000010F0},
+ {0x10030, 0x000014ED},
+ {0x10030, 0x000018AC},
+ {0x10030, 0x00001CA9},
+ {0x10030, 0x00002069},
+ {0x10030, 0x00002466},
+ {0x10030, 0x00002829},
+ {0x10030, 0x00002C26},
+ {0x10030, 0x00003023},
+ {0x10030, 0x00003420},
+ {0x10030, 0x0000381D},
+ {0x10030, 0x00003C1A},
+ {0x10030, 0x00004017},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000780F4},
+ {0x10030, 0x000784F1},
+ {0x10030, 0x000788EE},
+ {0x10030, 0x00078CEB},
+ {0x10030, 0x000790E8},
+ {0x10030, 0x000794E5},
+ {0x10030, 0x000798E2},
+ {0x10030, 0x00079CDF},
+ {0x10030, 0x0007A0DC},
+ {0x10030, 0x0007A4D9},
+ {0x10030, 0x0007A8D6},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B0D0},
+ {0x10030, 0x0007B4CD},
+ {0x10030, 0x0007B8CA},
+ {0x10030, 0x0007BC07},
+ {0x10030, 0x0007C004},
+ {0x100EE, 0x00000000},
+ {0x0EF, 0x00002000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000004},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00040000},
+ {0x030, 0x000109B7},
+ {0x0EB, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x10005, 0x00000001},
+ {0x100EE, 0x00000400},
+ {0x10030, 0x00000000},
+ {0x10030, 0x00001000},
+ {0x10030, 0x00002000},
+ {0x10030, 0x00003000},
+ {0x10030, 0x00004000},
+ {0x10030, 0x00005000},
+ {0x10030, 0x00006003},
+ {0x10030, 0x00007003},
+ {0x10030, 0x00008000},
+ {0x10030, 0x00009000},
+ {0x10030, 0x0000A000},
+ {0x10030, 0x0000B000},
+ {0x10030, 0x0000C000},
+ {0x10030, 0x0000D000},
+ {0x10030, 0x0000E003},
+ {0x10030, 0x0000F003},
+ {0x10030, 0x00010000},
+ {0x10030, 0x00011000},
+ {0x10030, 0x00012000},
+ {0x10030, 0x00013000},
+ {0x10030, 0x00014000},
+ {0x10030, 0x00015000},
+ {0x10030, 0x00016003},
+ {0x10030, 0x00017003},
+ {0x10030, 0x00018000},
+ {0x10030, 0x00019000},
+ {0x10030, 0x0001A000},
+ {0x10030, 0x0001B000},
+ {0x10030, 0x0001C000},
+ {0x10030, 0x0001D000},
+ {0x10030, 0x0001E003},
+ {0x10030, 0x0001F003},
+ {0x10030, 0x00020000},
+ {0x10030, 0x00021000},
+ {0x10030, 0x00022000},
+ {0x10030, 0x00023000},
+ {0x10030, 0x00024000},
+ {0x10030, 0x00025000},
+ {0x10030, 0x00026003},
+ {0x10030, 0x00027003},
+ {0x10030, 0x00028000},
+ {0x10030, 0x00029000},
+ {0x10030, 0x0002A000},
+ {0x10030, 0x0002B000},
+ {0x10030, 0x0002C000},
+ {0x10030, 0x0002D000},
+ {0x10030, 0x0002E003},
+ {0x10030, 0x0002F003},
+ {0x10030, 0x00030000},
+ {0x10030, 0x00031000},
+ {0x10030, 0x00032000},
+ {0x10030, 0x00033000},
+ {0x10030, 0x00034000},
+ {0x10030, 0x00035000},
+ {0x10030, 0x00036003},
+ {0x10030, 0x00037003},
+ {0x10030, 0x00038000},
+ {0x10030, 0x00039000},
+ {0x10030, 0x0003A000},
+ {0x10030, 0x0003B000},
+ {0x10030, 0x0003C000},
+ {0x10030, 0x0003D000},
+ {0x10030, 0x0003E003},
+ {0x10030, 0x0003F003},
+ {0x10030, 0x00060000},
+ {0x10030, 0x00061000},
+ {0x10030, 0x00062000},
+ {0x10030, 0x00063000},
+ {0x10030, 0x00064000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x10030, 0x00067003},
+ {0x10030, 0x00068000},
+ {0x10030, 0x00069000},
+ {0x10030, 0x0006A000},
+ {0x10030, 0x0006B000},
+ {0x10030, 0x0006C000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x10030, 0x0006F003},
+ {0x10030, 0x00070000},
+ {0x10030, 0x00071000},
+ {0x10030, 0x00072000},
+ {0x10030, 0x00073000},
+ {0x10030, 0x00074000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x10030, 0x00077003},
+ {0x10030, 0x00078000},
+ {0x10030, 0x00079000},
+ {0x10030, 0x0007A000},
+ {0x10030, 0x0007B000},
+ {0x10030, 0x0007C000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x10030, 0x0007F003},
+ {0x100EE, 0x00000000},
+ {0x0FE, 0x00000031},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0320000, 0x00000002},
+ {0xF0330000, 0x00000003},
+ {0xF0340000, 0x00000004},
+ {0xF0350000, 0x00000005},
+ {0xF0360000, 0x00000006},
+ {0xF0010001, 0x00000007},
+ {0xF0020001, 0x00000008},
+ {0xF0320001, 0x00000009},
+ {0xF0330001, 0x0000000A},
+ {0xF0340001, 0x0000000B},
+ {0xF0350001, 0x0000000C},
+ {0xF0360001, 0x0000000D},
+ {0xF03F0001, 0x0000000E},
+ {0xF0400001, 0x0000000F},
+ {0x005, 0x00000000},
+ {0x10005, 0x00000000},
+ {0x0B9, 0x00020440},
+ {0x000, 0x00030001},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x05F, 0x00000032},
+ {0x097, 0x00043200},
+ {0x0A6, 0x00066DB7},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000003},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000002},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x00000015},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x0EF, 0x00000000},
+ {0x000, 0x00033C01},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x0FE, 0x00000000},
+ {0x096, 0x00015200},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0xA0000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0xB0000000, 0x00000000},
+ {0x057, 0x0000D589},
+ {0x05A, 0x0007FFFF},
+ {0x043, 0x00005000},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0xA0000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0xB0000000, 0x00000000},
+ {0x0EB, 0x00000000},
+ {0x030, 0x000109B0},
+ {0x030, 0x000189B0},
+ {0x0EB, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000008},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000009},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000AFFF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000010},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000011},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000012},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000013},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000014},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000018},
+ {0x03F, 0x0000FBFF},
+ {0x033, 0x00000019},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000021},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000023},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000027},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000030},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000031},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000033},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000035},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000036},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000037},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000038},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000039},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000003A},
+ {0x03F, 0x0000EFFF},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000000},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000001},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000002},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000003},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000004},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000005},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000006},
+ {0x03F, 0x00004324},
+ {0x033, 0x00000007},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000008},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000009},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000010},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000011},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000012},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000013},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000014},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000015},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000016},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000017},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000018},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000019},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00004344},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000020},
+ {0x033, 0x00000010},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000011},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000012},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000013},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000200},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x030, 0x00068000},
+ {0x030, 0x00070000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000006},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000017},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000018},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00029858},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x0000001A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x0000001E},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001F},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000020},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000022},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000023},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000024},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000025},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000026},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000027},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000028},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000029},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000030},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000031},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000032},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000033},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000034},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000035},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000036},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000037},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000038},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000039},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000031},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000023},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EC, 0x00000400},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000030},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000021},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x0A7, 0x00080308},
+ {0x066, 0x00006000},
+ {0x0EF, 0x00000400},
+ {0x030, 0x000001FF},
+ {0x030, 0x000081FF},
+ {0x030, 0x000101FF},
+ {0x030, 0x000181FF},
+ {0x030, 0x000201FF},
+ {0x030, 0x000281FF},
+ {0x030, 0x0003017F},
+ {0x030, 0x000380FB},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x06A, 0x000E0F8A},
+ {0x06B, 0x000018A0},
+ {0x06F, 0x000F81FC},
+ {0x05E, 0x0000001F},
+ {0x0EF, 0x00000200},
+ {0x030, 0x0003D407},
+ {0x030, 0x00035A87},
+ {0x030, 0x0002CF07},
+ {0x030, 0x00024F07},
+ {0x030, 0x0001CF07},
+ {0x030, 0x00014F07},
+ {0x030, 0x0000CF07},
+ {0x030, 0x00004F07},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00080000},
+ {0x030, 0x00008038},
+ {0x030, 0x00010038},
+ {0x030, 0x00018038},
+ {0x030, 0x00020038},
+ {0x030, 0x00028038},
+ {0x030, 0x00030038},
+ {0x030, 0x0003803C},
+ {0x030, 0x0004003C},
+ {0x030, 0x0004803C},
+ {0x030, 0x0005003C},
+ {0x030, 0x0005803C},
+ {0x030, 0x0006003C},
+ {0x030, 0x0006803C},
+ {0x030, 0x0007003C},
+ {0x0EB, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0xA0000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000030},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000034},
+ {0x03F, 0x000001E7},
+ {0x033, 0x00000038},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000003C},
+ {0x03F, 0x000003E7},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000031},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000035},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000039},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000003D},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000005A},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000032},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000036},
+ {0x03F, 0x000001E6},
+ {0x033, 0x0000003A},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000003E},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000060},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000064},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000068},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006C},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000070},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000074},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000078},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007C},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000061},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000065},
+ {0x03F, 0x0000005A},
+ {0x033, 0x00000069},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006D},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000071},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000075},
+ {0x03F, 0x000001E6},
+ {0x033, 0x00000079},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007D},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000062},
+ {0x03F, 0x00000052},
+ {0x033, 0x00000066},
+ {0x03F, 0x0000005A},
+ {0x033, 0x0000006A},
+ {0x03F, 0x0000009C},
+ {0x033, 0x0000006E},
+ {0x03F, 0x0000019C},
+ {0x033, 0x00000072},
+ {0x03F, 0x000001A4},
+ {0x033, 0x00000076},
+ {0x03F, 0x000001E6},
+ {0x033, 0x0000007A},
+ {0x03F, 0x000002E6},
+ {0x033, 0x0000007E},
+ {0x03F, 0x000003E6},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000007F},
+ {0x03F, 0x000003E7},
+ {0x0EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E1},
+ {0x10030, 0x0007A4DB},
+ {0x10030, 0x0007A8A1},
+ {0x10030, 0x0007AC9B},
+ {0x10030, 0x0007B061},
+ {0x10030, 0x0007B45B},
+ {0x10030, 0x0007B821},
+ {0x10030, 0x0007BC1B},
+ {0x10030, 0x0007C015},
+ {0x10030, 0x0007C40F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000000FC},
+ {0x10030, 0x000004F9},
+ {0x10030, 0x000008F6},
+ {0x10030, 0x00000CF3},
+ {0x10030, 0x000010F0},
+ {0x10030, 0x000014ED},
+ {0x10030, 0x000018AC},
+ {0x10030, 0x00001CA9},
+ {0x10030, 0x00002069},
+ {0x10030, 0x00002466},
+ {0x10030, 0x00002829},
+ {0x10030, 0x00002C26},
+ {0x10030, 0x00003023},
+ {0x10030, 0x00003420},
+ {0x10030, 0x0000381D},
+ {0x10030, 0x00003C1A},
+ {0x10030, 0x00004017},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000780F4},
+ {0x10030, 0x000784F1},
+ {0x10030, 0x000788EE},
+ {0x10030, 0x00078CEB},
+ {0x10030, 0x000790E8},
+ {0x10030, 0x000794E5},
+ {0x10030, 0x000798E2},
+ {0x10030, 0x00079CDF},
+ {0x10030, 0x0007A0DC},
+ {0x10030, 0x0007A4D9},
+ {0x10030, 0x0007A8D6},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B0D0},
+ {0x10030, 0x0007B4CD},
+ {0x10030, 0x0007B8CA},
+ {0x10030, 0x0007BC07},
+ {0x10030, 0x0007C004},
+ {0x100EE, 0x00000000},
+ {0x0EF, 0x00002000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000004},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00040000},
+ {0x030, 0x000109B7},
+ {0x0EB, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x10005, 0x00000001},
+ {0x100EE, 0x00000400},
+ {0x10030, 0x00000000},
+ {0x10030, 0x00001000},
+ {0x10030, 0x00002000},
+ {0x10030, 0x00003000},
+ {0x10030, 0x00004000},
+ {0x10030, 0x00005000},
+ {0x10030, 0x00006003},
+ {0x10030, 0x00007003},
+ {0x10030, 0x00008000},
+ {0x10030, 0x00009000},
+ {0x10030, 0x0000A000},
+ {0x10030, 0x0000B000},
+ {0x10030, 0x0000C000},
+ {0x10030, 0x0000D000},
+ {0x10030, 0x0000E003},
+ {0x10030, 0x0000F003},
+ {0x10030, 0x00010000},
+ {0x10030, 0x00011000},
+ {0x10030, 0x00012000},
+ {0x10030, 0x00013000},
+ {0x10030, 0x00014000},
+ {0x10030, 0x00015000},
+ {0x10030, 0x00016003},
+ {0x10030, 0x00017003},
+ {0x10030, 0x00018000},
+ {0x10030, 0x00019000},
+ {0x10030, 0x0001A000},
+ {0x10030, 0x0001B000},
+ {0x10030, 0x0001C000},
+ {0x10030, 0x0001D000},
+ {0x10030, 0x0001E003},
+ {0x10030, 0x0001F003},
+ {0x10030, 0x00020000},
+ {0x10030, 0x00021000},
+ {0x10030, 0x00022000},
+ {0x10030, 0x00023000},
+ {0x10030, 0x00024000},
+ {0x10030, 0x00025000},
+ {0x10030, 0x00026003},
+ {0x10030, 0x00027003},
+ {0x10030, 0x00028000},
+ {0x10030, 0x00029000},
+ {0x10030, 0x0002A000},
+ {0x10030, 0x0002B000},
+ {0x10030, 0x0002C000},
+ {0x10030, 0x0002D000},
+ {0x10030, 0x0002E003},
+ {0x10030, 0x0002F003},
+ {0x10030, 0x00030000},
+ {0x10030, 0x00031000},
+ {0x10030, 0x00032000},
+ {0x10030, 0x00033000},
+ {0x10030, 0x00034000},
+ {0x10030, 0x00035000},
+ {0x10030, 0x00036003},
+ {0x10030, 0x00037003},
+ {0x10030, 0x00038000},
+ {0x10030, 0x00039000},
+ {0x10030, 0x0003A000},
+ {0x10030, 0x0003B000},
+ {0x10030, 0x0003C000},
+ {0x10030, 0x0003D000},
+ {0x10030, 0x0003E003},
+ {0x10030, 0x0003F003},
+ {0x10030, 0x00060000},
+ {0x10030, 0x00061000},
+ {0x10030, 0x00062000},
+ {0x10030, 0x00063000},
+ {0x10030, 0x00064000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x10030, 0x00067003},
+ {0x10030, 0x00068000},
+ {0x10030, 0x00069000},
+ {0x10030, 0x0006A000},
+ {0x10030, 0x0006B000},
+ {0x10030, 0x0006C000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x10030, 0x0006F003},
+ {0x10030, 0x00070000},
+ {0x10030, 0x00071000},
+ {0x10030, 0x00072000},
+ {0x10030, 0x00073000},
+ {0x10030, 0x00074000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x10030, 0x00077003},
+ {0x10030, 0x00078000},
+ {0x10030, 0x00079000},
+ {0x10030, 0x0007A000},
+ {0x10030, 0x0007B000},
+ {0x10030, 0x0007C000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x10030, 0x0007F003},
+ {0x0ED, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000000A},
+ {0x0ED, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x0FE, 0x00000031},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = {
+ {0x8008, 0x00000000},
+ {0x8000, 0x00000008},
+ {0x8004, 0xf0862966},
+ {0x800c, 0x78000000},
+ {0x8010, 0x88015000},
+ {0x8014, 0x80010100},
+ {0x8018, 0x10010100},
+ {0x801c, 0xa210bc00},
+ {0x8020, 0x000403e0},
+ {0x8024, 0x00072160},
+ {0x8028, 0x00180e00},
+ {0x8030, 0x400000c0},
+ {0x8034, 0x11000830},
+ {0x8038, 0x00000009},
+ {0x803c, 0x00000008},
+ {0x8040, 0x00000046},
+ {0x8044, 0x0010001f},
+ {0x8048, 0xf0000003},
+ {0x804c, 0x62ac6162},
+ {0x8050, 0xf2acf162},
+ {0x8054, 0x62ac6162},
+ {0x8058, 0xf2acf162},
+ {0x805c, 0x150c0b02},
+ {0x8060, 0x150c0b02},
+ {0x8064, 0x2aa00047},
+ {0x8074, 0x80000000},
+ {0x807c, 0x000000ee},
+ {0x8088, 0x80000000},
+ {0x808c, 0x00000000},
+ {0x80b0, 0x00000000},
+ {0x80d0, 0x00000000},
+ {0x80ec, 0x00000002},
+ {0x8098, 0x0000ff00},
+ {0x8070, 0x00e80000},
+ {0x80b0, 0xffe00fff},
+ {0x809c, 0x0000001f},
+ {0x80b8, 0x00001000},
+ {0x80bc, 0x0005001d},
+ {0x810c, 0x33112211},
+ {0x8110, 0x33112211},
+ {0x8114, 0x00000000},
+ {0x8120, 0x10010000},
+ {0x8124, 0x00000000},
+ {0x8128, 0x00000200},
+ {0x812c, 0x0000c000},
+ {0x8138, 0x40000000},
+ {0x813c, 0x40000000},
+ {0x8140, 0x00000000},
+ {0x8144, 0x0b040b03},
+ {0x8148, 0x0a040b04},
+ {0x814c, 0x0a040b04},
+ {0x8150, 0xe4e40000},
+ {0x8158, 0xffffffff},
+ {0x815c, 0xffffffff},
+ {0x8160, 0xffffffff},
+ {0x8164, 0xffffffff},
+ {0x8168, 0xffffffff},
+ {0x816c, 0x1fffffff},
+ {0x81cc, 0x00000000},
+ {0x81dc, 0x00000002},
+ {0x81e0, 0x00000000},
+ {0x81e4, 0x00000001},
+ {0x81a0, 0x00000000},
+ {0x81ac, 0x3fc20400},
+ {0x81b0, 0x3f914100},
+ {0x81bc, 0x0000005b},
+ {0x81c0, 0x0000005b},
+ {0x81b4, 0x01e0f078},
+ {0x81b8, 0x01e0f078},
+ {0x81f0, 0x0000f078},
+ {0x820c, 0x33112211},
+ {0x8210, 0x33112211},
+ {0x8214, 0x00000000},
+ {0x8220, 0x10010000},
+ {0x8224, 0x00000000},
+ {0x8228, 0x00000200},
+ {0x822c, 0x0000d000},
+ {0x8238, 0x40000000},
+ {0x823c, 0x40000000},
+ {0x8240, 0x00000000},
+ {0x8244, 0x0b040b03},
+ {0x8248, 0x0a040b04},
+ {0x824c, 0x0a040b04},
+ {0x8250, 0xe4e40000},
+ {0x8258, 0xffffffff},
+ {0x825c, 0xffffffff},
+ {0x8260, 0xffffffff},
+ {0x8264, 0xffffffff},
+ {0x8268, 0xffffffff},
+ {0x826c, 0x1fffffff},
+ {0x82cc, 0x00000000},
+ {0x82dc, 0x00000002},
+ {0x82e0, 0x00100000},
+ {0x82e4, 0x00000001},
+ {0x82a0, 0x00000000},
+ {0x82ac, 0x3fc20400},
+ {0x82b0, 0x3f914100},
+ {0x82bc, 0x0000005b},
+ {0x82c0, 0x0000005b},
+ {0x82b4, 0x01e0f078},
+ {0x82b8, 0x01e0f078},
+ {0x82f0, 0x0000f078},
+ {0x81d8, 0x00000001},
+ {0x82d8, 0x00000001},
+ {0x9500, 0x00000000},
+ {0x9504, 0x00000000},
+ {0x9508, 0x00000000},
+ {0x950c, 0x00000000},
+ {0x9510, 0x00000000},
+ {0x9514, 0x00000000},
+ {0x9518, 0x00000000},
+ {0x951c, 0x00000000},
+ {0x9520, 0x00000000},
+ {0x9524, 0x00000000},
+ {0x9528, 0x00000000},
+ {0x952c, 0x00000000},
+ {0x9530, 0x00000000},
+ {0x9534, 0x00000000},
+ {0x9538, 0x00000000},
+ {0x953c, 0x00000000},
+ {0x9540, 0x04000000},
+ {0x9544, 0x00000000},
+ {0x9548, 0x00000000},
+ {0x954c, 0x00000000},
+ {0x9550, 0x00000000},
+ {0x9554, 0x00000000},
+ {0x9558, 0x00000000},
+ {0x955c, 0x00000000},
+ {0x9560, 0x00000000},
+ {0x9564, 0x00000000},
+ {0x9568, 0x00000000},
+ {0x956c, 0x00000000},
+ {0x9570, 0x00000000},
+ {0x9574, 0x00000000},
+ {0x9578, 0x00000000},
+ {0x957c, 0x00000000},
+ {0x9580, 0x00000000},
+ {0x9584, 0x04000000},
+ {0x9588, 0x00000000},
+ {0x958c, 0x00000000},
+ {0x9590, 0x00000000},
+ {0x9594, 0x00000000},
+ {0x9598, 0x00000000},
+ {0x959c, 0x00000000},
+ {0x95a0, 0x00000000},
+ {0x95a4, 0x00000000},
+ {0x95a8, 0x00000000},
+ {0x95ac, 0x00000000},
+ {0x95b0, 0x00000000},
+ {0x95b4, 0x00000000},
+ {0x95b8, 0x00000000},
+ {0x95bc, 0x00000000},
+ {0x95c0, 0x00000000},
+ {0x95c4, 0x00000000},
+ {0x95c8, 0x04000000},
+ {0x95cc, 0x00000000},
+ {0x95d0, 0x00000000},
+ {0x95d4, 0x00000000},
+ {0x95d8, 0x00000000},
+ {0x95dc, 0x00000000},
+ {0x95e0, 0x00000000},
+ {0x95e4, 0x00000000},
+ {0x95e8, 0x00000000},
+ {0x95ec, 0x00000000},
+ {0x95f0, 0x00000000},
+ {0x95f4, 0x00000000},
+ {0x95f8, 0x00000000},
+ {0x95fc, 0x00000000},
+ {0x9600, 0x00000000},
+ {0x9604, 0x00000000},
+ {0x9608, 0x00000000},
+ {0x960c, 0x04000000},
+ {0x9610, 0x00000000},
+ {0x9614, 0x00000000},
+ {0x9618, 0x00000000},
+ {0x961c, 0x00000000},
+ {0x9620, 0x00000000},
+ {0x9624, 0x00000000},
+ {0x9628, 0x00000000},
+ {0x962c, 0x00000000},
+ {0x9630, 0x00000000},
+ {0x9634, 0x00000000},
+ {0x9638, 0x00000000},
+ {0x963c, 0x00000000},
+ {0x9640, 0x00000000},
+ {0x9644, 0x00000000},
+ {0x9648, 0x00000000},
+ {0x964c, 0x00000000},
+ {0x9650, 0x04000000},
+ {0x9654, 0x00000000},
+ {0x9658, 0x00000000},
+ {0x965c, 0x00000000},
+ {0x9660, 0x00000000},
+ {0x9664, 0x00000000},
+ {0x9668, 0x00000000},
+ {0x966c, 0x00000000},
+ {0x9670, 0x00000000},
+ {0x9674, 0x00000000},
+ {0x9678, 0x00000000},
+ {0x967c, 0x00000000},
+ {0x9680, 0x00000000},
+ {0x9684, 0x00000000},
+ {0x9688, 0x00000000},
+ {0x968c, 0x00000000},
+ {0x9690, 0x00000000},
+ {0x9694, 0x04000000},
+ {0x9698, 0x00000000},
+ {0x969c, 0x00000000},
+ {0x96a0, 0x00000000},
+ {0x96a4, 0x00000000},
+ {0x96a8, 0x00000000},
+ {0x96ac, 0x00000000},
+ {0x96b0, 0x00000000},
+ {0x96b4, 0x00000000},
+ {0x96b8, 0x00000000},
+ {0x96bc, 0x00000000},
+ {0x96c0, 0x00000000},
+ {0x96c4, 0x00000000},
+ {0x96c8, 0x00000000},
+ {0x96cc, 0x00000000},
+ {0x96d0, 0x00000000},
+ {0x96d4, 0x00000000},
+ {0x96d8, 0x04000000},
+ {0x96dc, 0x00000000},
+ {0x96e0, 0x00000000},
+ {0x96e4, 0x00000000},
+ {0x96e8, 0x00000000},
+ {0x96ec, 0x00000000},
+ {0x96f0, 0x00000000},
+ {0x96f4, 0x00000000},
+ {0x96f8, 0x00000000},
+ {0x96fc, 0x00000000},
+ {0x9700, 0x00000000},
+ {0x9704, 0x00000000},
+ {0x9708, 0x00000000},
+ {0x970c, 0x00000000},
+ {0x9710, 0x00000000},
+ {0x9714, 0x00000000},
+ {0x9718, 0x00000000},
+ {0x971c, 0x04000000},
+ {0x9720, 0x00000000},
+ {0x9724, 0x00000000},
+ {0x9728, 0x00000000},
+ {0x972c, 0x00000000},
+ {0x9730, 0x00000000},
+ {0x9734, 0x00000000},
+ {0x9738, 0x00000000},
+ {0x973c, 0x00000000},
+ {0x9740, 0x00000000},
+ {0x9744, 0x00000000},
+ {0x9748, 0x00000000},
+ {0x974c, 0x00000000},
+ {0x9750, 0x00000000},
+ {0x9754, 0x00000000},
+ {0x9758, 0x00000000},
+ {0x975c, 0x00000000},
+ {0x9760, 0x04000000},
+ {0x9764, 0x00000000},
+ {0x9768, 0x00000000},
+ {0x976c, 0x00000000},
+ {0x9770, 0x00000000},
+ {0x9774, 0x00000000},
+ {0x9778, 0x00000000},
+ {0x977c, 0x00000000},
+ {0x9780, 0x00000000},
+ {0x9784, 0x00000000},
+ {0x9788, 0x00000000},
+ {0x978c, 0x00000000},
+ {0x9790, 0x00000000},
+ {0x9794, 0x00000000},
+ {0x9798, 0x00000000},
+ {0x979c, 0x00000000},
+ {0x97a0, 0x00000000},
+ {0x97a4, 0x04000000},
+ {0x97a8, 0x00000000},
+ {0x97ac, 0x00000000},
+ {0x97b0, 0x00000000},
+ {0x97b4, 0x00000000},
+ {0x97b8, 0x00000000},
+ {0x97bc, 0x00000000},
+ {0x97c0, 0x00000000},
+ {0x97c4, 0x00000000},
+ {0x97c8, 0x00000000},
+ {0x97cc, 0x00000000},
+ {0x97d0, 0x00000000},
+ {0x97d4, 0x00000000},
+ {0x97d8, 0x00000000},
+ {0x97dc, 0x00000000},
+ {0x97e0, 0x00000000},
+ {0x97e4, 0x00000000},
+ {0x97e8, 0x04000000},
+ {0x97ec, 0x00000000},
+ {0x97f0, 0x00000000},
+ {0x97f4, 0x00000000},
+ {0x97f8, 0x00000000},
+ {0x97fc, 0x00000000},
+ {0x9800, 0x00000000},
+ {0x9804, 0x00000000},
+ {0x9808, 0x00000000},
+ {0x980c, 0x00000000},
+ {0x9810, 0x00000000},
+ {0x9814, 0x00000000},
+ {0x9818, 0x00000000},
+ {0x981c, 0x00000000},
+ {0x9820, 0x00000000},
+ {0x9824, 0x00000000},
+ {0x9828, 0x00000000},
+ {0x982c, 0x04000000},
+ {0x9830, 0x00000000},
+ {0x9834, 0x00000000},
+ {0x9838, 0x00000000},
+ {0x983c, 0x00000000},
+ {0x9840, 0x00000000},
+ {0x9844, 0x00000000},
+ {0x9848, 0x00000000},
+ {0x984c, 0x00000000},
+ {0x9850, 0x00000000},
+ {0x9854, 0x00000000},
+ {0x9858, 0x00000000},
+ {0x985c, 0x00000000},
+ {0x9860, 0x00000000},
+ {0x9864, 0x00000000},
+ {0x9868, 0x00000000},
+ {0x986c, 0x00000000},
+ {0x9870, 0x04000000},
+ {0x9874, 0x00000000},
+ {0x9878, 0x00000000},
+ {0x987c, 0x00000000},
+ {0x9880, 0x00000000},
+ {0x9884, 0x00000000},
+ {0x9888, 0x00000000},
+ {0x988c, 0x00000000},
+ {0x9890, 0x00000000},
+ {0x9894, 0x00000000},
+ {0x9898, 0x00000000},
+ {0x989c, 0x00000000},
+ {0x98a0, 0x00000000},
+ {0x98a4, 0x00000000},
+ {0x98a8, 0x00000000},
+ {0x98ac, 0x00000000},
+ {0x98b0, 0x00000000},
+ {0x98b4, 0x04000000},
+ {0x98b8, 0x00000000},
+ {0x98bc, 0x00000000},
+ {0x98c0, 0x00000000},
+ {0x98c4, 0x00000000},
+ {0x98c8, 0x00000000},
+ {0x98cc, 0x00000000},
+ {0x98d0, 0x00000000},
+ {0x98d4, 0x00000000},
+ {0x98d8, 0x00000000},
+ {0x98dc, 0x00000000},
+ {0x98e0, 0x00000000},
+ {0x98e4, 0x00000000},
+ {0x98e8, 0x00000000},
+ {0x98ec, 0x00000000},
+ {0x98f0, 0x00000000},
+ {0x98f4, 0x00000000},
+ {0x98f8, 0x04000000},
+ {0x98fc, 0x00000000},
+ {0x9900, 0x00000000},
+ {0x9904, 0x00000000},
+ {0x9908, 0x00000000},
+ {0x990c, 0x00000000},
+ {0x9910, 0x00000000},
+ {0x9914, 0x00000000},
+ {0x9918, 0x00000000},
+ {0x991c, 0x00000000},
+ {0x9920, 0x00000000},
+ {0x9924, 0x00000000},
+ {0x9928, 0x00000000},
+ {0x992c, 0x00000000},
+ {0x9930, 0x00000000},
+ {0x9934, 0x00000000},
+ {0x9938, 0x00000000},
+ {0x993c, 0x04000000},
+ {0x9940, 0x00000000},
+ {0x9944, 0x00000000},
+ {0x9948, 0x00000000},
+ {0x994c, 0x00000000},
+ {0x9950, 0x00000000},
+ {0x9954, 0x00000000},
+ {0x9958, 0x00000000},
+ {0x995c, 0x00000000},
+ {0x9960, 0x00000000},
+ {0x9964, 0x00000000},
+ {0x9968, 0x00000000},
+ {0x996c, 0x00000000},
+ {0x9970, 0x00000000},
+ {0x9974, 0x00000000},
+ {0x9978, 0x00000000},
+ {0x997c, 0x00000000},
+ {0x9980, 0x04000000},
+ {0x9984, 0x00000000},
+ {0x9988, 0x00000000},
+ {0x998c, 0x00000000},
+ {0x9990, 0x00000000},
+ {0x9994, 0x00000000},
+ {0x9998, 0x00000000},
+ {0x999c, 0x00000000},
+ {0x99a0, 0x00000000},
+ {0x99a4, 0x00000000},
+ {0x99a8, 0x00000000},
+ {0x99ac, 0x00000000},
+ {0x99b0, 0x00000000},
+ {0x99b4, 0x00000000},
+ {0x99b8, 0x00000000},
+ {0x99bc, 0x00000000},
+ {0x99c0, 0x00000000},
+ {0x99c4, 0x04000000},
+ {0x99c8, 0x00000000},
+ {0x99cc, 0x00000000},
+ {0x99d0, 0x00000000},
+ {0x99d4, 0x00000000},
+ {0x99d8, 0x00000000},
+ {0x99dc, 0x00000000},
+ {0x99e0, 0x00000000},
+ {0x99e4, 0x00000000},
+ {0x99e8, 0x00000000},
+ {0x99ec, 0x00000000},
+ {0x99f0, 0x00000000},
+ {0x99f4, 0x00000000},
+ {0x99f8, 0x00000000},
+ {0x99fc, 0x00000000},
+ {0x9a00, 0x00000000},
+ {0x9a04, 0x00000000},
+ {0x9a08, 0x04000000},
+ {0x9a0c, 0x00000000},
+ {0x9a10, 0x00000000},
+ {0x9a14, 0x00000000},
+ {0x9a18, 0x00000000},
+ {0x9a1c, 0x00000000},
+ {0x9a20, 0x00000000},
+ {0x9a24, 0x00000000},
+ {0x9a28, 0x00000000},
+ {0x9a2c, 0x00000000},
+ {0x9a30, 0x00000000},
+ {0x9a34, 0x00000000},
+ {0x9a38, 0x00000000},
+ {0x9a3c, 0x00000000},
+ {0x9a40, 0x00000000},
+ {0x9a44, 0x00000000},
+ {0x9a48, 0x00000000},
+ {0x9a4c, 0x04000000},
+ {0x9a50, 0x00000000},
+ {0x9a54, 0x00000000},
+ {0x9a58, 0x00000000},
+ {0x9a5c, 0x00000000},
+ {0x9a60, 0x00000000},
+ {0x9a64, 0x00000000},
+ {0x9a68, 0x00000000},
+ {0x9a6c, 0x00000000},
+ {0x9a70, 0x00000000},
+ {0x9a74, 0x00000000},
+ {0x9a78, 0x00000000},
+ {0x9a7c, 0x00000000},
+ {0x9a80, 0x00000000},
+ {0x9a84, 0x00000000},
+ {0x9a88, 0x00000000},
+ {0x9a8c, 0x00000000},
+ {0x9a90, 0x04000000},
+ {0x9a94, 0x00000000},
+ {0x9a98, 0x00000000},
+ {0x9a9c, 0x00000000},
+ {0x9aa0, 0x00000000},
+ {0x9aa4, 0x00000000},
+ {0x9aa8, 0x00000000},
+ {0x9aac, 0x00000000},
+ {0x9ab0, 0x00000000},
+ {0x9ab4, 0x00000000},
+ {0x9ab8, 0x00000000},
+ {0x9abc, 0x00000000},
+ {0x9ac0, 0x00000000},
+ {0x9ac4, 0x00000000},
+ {0x9ac8, 0x00000000},
+ {0x9acc, 0x00000000},
+ {0x9ad0, 0x00000000},
+ {0x9ad4, 0x04000000},
+ {0x9ad8, 0x00000000},
+ {0x9adc, 0x00000000},
+ {0x9ae0, 0x00000000},
+ {0x9ae4, 0x00000000},
+ {0x9ae8, 0x00000000},
+ {0x9aec, 0x00000000},
+ {0x9af0, 0x00000000},
+ {0x9af4, 0x00000000},
+ {0x9af8, 0x00000000},
+ {0x9afc, 0x00000000},
+ {0x9b00, 0x00000000},
+ {0x9b04, 0x00000000},
+ {0x9b08, 0x00000000},
+ {0x9b0c, 0x00000000},
+ {0x9b10, 0x00000000},
+ {0x9b14, 0x00000000},
+ {0x9b18, 0x04000000},
+ {0x9b1c, 0x00000000},
+ {0x9b20, 0x00000000},
+ {0x9b24, 0x00000000},
+ {0x9b28, 0x00000000},
+ {0x9b2c, 0x00000000},
+ {0x9b30, 0x00000000},
+ {0x9b34, 0x00000000},
+ {0x9b38, 0x00000000},
+ {0x9b3c, 0x00000000},
+ {0x9b40, 0x00000000},
+ {0x9b44, 0x00000000},
+ {0x9b48, 0x00000000},
+ {0x9b4c, 0x00000000},
+ {0x9b50, 0x00000000},
+ {0x9b54, 0x00000000},
+ {0x9b58, 0x00000000},
+ {0x9b5c, 0x04000000},
+ {0x9d00, 0x00000000},
+ {0x9d04, 0x00000000},
+ {0x9d08, 0x00000000},
+ {0x9d0c, 0x00000000},
+ {0x9d10, 0x00000000},
+ {0x9d14, 0x00000000},
+ {0x9d18, 0x00000000},
+ {0x9d1c, 0x00000000},
+ {0x9d20, 0x00000000},
+ {0x9d24, 0x00000000},
+ {0x9d28, 0x00000000},
+ {0x9d2c, 0x00000000},
+ {0x9d30, 0x00000000},
+ {0x9d34, 0x00000000},
+ {0x9d38, 0x00000000},
+ {0x9d3c, 0x00000000},
+ {0x9d40, 0x04000000},
+ {0x9d44, 0x00000000},
+ {0x9d48, 0x00000000},
+ {0x9d4c, 0x00000000},
+ {0x9d50, 0x00000000},
+ {0x9d54, 0x00000000},
+ {0x9d58, 0x00000000},
+ {0x9d5c, 0x00000000},
+ {0x9d60, 0x00000000},
+ {0x9d64, 0x00000000},
+ {0x9d68, 0x00000000},
+ {0x9d6c, 0x00000000},
+ {0x9d70, 0x00000000},
+ {0x9d74, 0x00000000},
+ {0x9d78, 0x00000000},
+ {0x9d7c, 0x00000000},
+ {0x9d80, 0x00000000},
+ {0x9d84, 0x04000000},
+ {0x9d88, 0x00000000},
+ {0x9d8c, 0x00000000},
+ {0x9d90, 0x00000000},
+ {0x9d94, 0x00000000},
+ {0x9d98, 0x00000000},
+ {0x9d9c, 0x00000000},
+ {0x9da0, 0x00000000},
+ {0x9da4, 0x00000000},
+ {0x9da8, 0x00000000},
+ {0x9dac, 0x00000000},
+ {0x9db0, 0x00000000},
+ {0x9db4, 0x00000000},
+ {0x9db8, 0x00000000},
+ {0x9dbc, 0x00000000},
+ {0x9dc0, 0x00000000},
+ {0x9dc4, 0x00000000},
+ {0x9dc8, 0x04000000},
+ {0x9dcc, 0x00000000},
+ {0x9dd0, 0x00000000},
+ {0x9dd4, 0x00000000},
+ {0x9dd8, 0x00000000},
+ {0x9ddc, 0x00000000},
+ {0x9de0, 0x00000000},
+ {0x9de4, 0x00000000},
+ {0x9de8, 0x00000000},
+ {0x9dec, 0x00000000},
+ {0x9df0, 0x00000000},
+ {0x9df4, 0x00000000},
+ {0x9df8, 0x00000000},
+ {0x9dfc, 0x00000000},
+ {0x9e00, 0x00000000},
+ {0x9e04, 0x00000000},
+ {0x9e08, 0x00000000},
+ {0x9e0c, 0x04000000},
+ {0x9e10, 0x00000000},
+ {0x9e14, 0x00000000},
+ {0x9e18, 0x00000000},
+ {0x9e1c, 0x00000000},
+ {0x9e20, 0x00000000},
+ {0x9e24, 0x00000000},
+ {0x9e28, 0x00000000},
+ {0x9e2c, 0x00000000},
+ {0x9e30, 0x00000000},
+ {0x9e34, 0x00000000},
+ {0x9e38, 0x00000000},
+ {0x9e3c, 0x00000000},
+ {0x9e40, 0x00000000},
+ {0x9e44, 0x00000000},
+ {0x9e48, 0x00000000},
+ {0x9e4c, 0x00000000},
+ {0x9e50, 0x04000000},
+ {0x9e54, 0x00000000},
+ {0x9e58, 0x00000000},
+ {0x9e5c, 0x00000000},
+ {0x9e60, 0x00000000},
+ {0x9e64, 0x00000000},
+ {0x9e68, 0x00000000},
+ {0x9e6c, 0x00000000},
+ {0x9e70, 0x00000000},
+ {0x9e74, 0x00000000},
+ {0x9e78, 0x00000000},
+ {0x9e7c, 0x00000000},
+ {0x9e80, 0x00000000},
+ {0x9e84, 0x00000000},
+ {0x9e88, 0x00000000},
+ {0x9e8c, 0x00000000},
+ {0x9e90, 0x00000000},
+ {0x9e94, 0x04000000},
+ {0x9e98, 0x00000000},
+ {0x9e9c, 0x00000000},
+ {0x9ea0, 0x00000000},
+ {0x9ea4, 0x00000000},
+ {0x9ea8, 0x00000000},
+ {0x9eac, 0x00000000},
+ {0x9eb0, 0x00000000},
+ {0x9eb4, 0x00000000},
+ {0x9eb8, 0x00000000},
+ {0x9ebc, 0x00000000},
+ {0x9ec0, 0x00000000},
+ {0x9ec4, 0x00000000},
+ {0x9ec8, 0x00000000},
+ {0x9ecc, 0x00000000},
+ {0x9ed0, 0x00000000},
+ {0x9ed4, 0x00000000},
+ {0x9ed8, 0x04000000},
+ {0x9edc, 0x00000000},
+ {0x9ee0, 0x00000000},
+ {0x9ee4, 0x00000000},
+ {0x9ee8, 0x00000000},
+ {0x9eec, 0x00000000},
+ {0x9ef0, 0x00000000},
+ {0x9ef4, 0x00000000},
+ {0x9ef8, 0x00000000},
+ {0x9efc, 0x00000000},
+ {0x9f00, 0x00000000},
+ {0x9f04, 0x00000000},
+ {0x9f08, 0x00000000},
+ {0x9f0c, 0x00000000},
+ {0x9f10, 0x00000000},
+ {0x9f14, 0x00000000},
+ {0x9f18, 0x00000000},
+ {0x9f1c, 0x04000000},
+ {0x9f20, 0x00000000},
+ {0x9f24, 0x00000000},
+ {0x9f28, 0x00000000},
+ {0x9f2c, 0x00000000},
+ {0x9f30, 0x00000000},
+ {0x9f34, 0x00000000},
+ {0x9f38, 0x00000000},
+ {0x9f3c, 0x00000000},
+ {0x9f40, 0x00000000},
+ {0x9f44, 0x00000000},
+ {0x9f48, 0x00000000},
+ {0x9f4c, 0x00000000},
+ {0x9f50, 0x00000000},
+ {0x9f54, 0x00000000},
+ {0x9f58, 0x00000000},
+ {0x9f5c, 0x00000000},
+ {0x9f60, 0x04000000},
+ {0x9f64, 0x00000000},
+ {0x9f68, 0x00000000},
+ {0x9f6c, 0x00000000},
+ {0x9f70, 0x00000000},
+ {0x9f74, 0x00000000},
+ {0x9f78, 0x00000000},
+ {0x9f7c, 0x00000000},
+ {0x9f80, 0x00000000},
+ {0x9f84, 0x00000000},
+ {0x9f88, 0x00000000},
+ {0x9f8c, 0x00000000},
+ {0x9f90, 0x00000000},
+ {0x9f94, 0x00000000},
+ {0x9f98, 0x00000000},
+ {0x9f9c, 0x00000000},
+ {0x9fa0, 0x00000000},
+ {0x9fa4, 0x04000000},
+ {0x9fa8, 0x00000000},
+ {0x9fac, 0x00000000},
+ {0x9fb0, 0x00000000},
+ {0x9fb4, 0x00000000},
+ {0x9fb8, 0x00000000},
+ {0x9fbc, 0x00000000},
+ {0x9fc0, 0x00000000},
+ {0x9fc4, 0x00000000},
+ {0x9fc8, 0x00000000},
+ {0x9fcc, 0x00000000},
+ {0x9fd0, 0x00000000},
+ {0x9fd4, 0x00000000},
+ {0x9fd8, 0x00000000},
+ {0x9fdc, 0x00000000},
+ {0x9fe0, 0x00000000},
+ {0x9fe4, 0x00000000},
+ {0x9fe8, 0x04000000},
+ {0x9fec, 0x00000000},
+ {0x9ff0, 0x00000000},
+ {0x9ff4, 0x00000000},
+ {0x9ff8, 0x00000000},
+ {0x9ffc, 0x00000000},
+ {0xa000, 0x00000000},
+ {0xa004, 0x00000000},
+ {0xa008, 0x00000000},
+ {0xa00c, 0x00000000},
+ {0xa010, 0x00000000},
+ {0xa014, 0x00000000},
+ {0xa018, 0x00000000},
+ {0xa01c, 0x00000000},
+ {0xa020, 0x00000000},
+ {0xa024, 0x00000000},
+ {0xa028, 0x00000000},
+ {0xa02c, 0x04000000},
+ {0xa030, 0x00000000},
+ {0xa034, 0x00000000},
+ {0xa038, 0x00000000},
+ {0xa03c, 0x00000000},
+ {0xa040, 0x00000000},
+ {0xa044, 0x00000000},
+ {0xa048, 0x00000000},
+ {0xa04c, 0x00000000},
+ {0xa050, 0x00000000},
+ {0xa054, 0x00000000},
+ {0xa058, 0x00000000},
+ {0xa05c, 0x00000000},
+ {0xa060, 0x00000000},
+ {0xa064, 0x00000000},
+ {0xa068, 0x00000000},
+ {0xa06c, 0x00000000},
+ {0xa070, 0x04000000},
+ {0xa074, 0x00000000},
+ {0xa078, 0x00000000},
+ {0xa07c, 0x00000000},
+ {0xa080, 0x00000000},
+ {0xa084, 0x00000000},
+ {0xa088, 0x00000000},
+ {0xa08c, 0x00000000},
+ {0xa090, 0x00000000},
+ {0xa094, 0x00000000},
+ {0xa098, 0x00000000},
+ {0xa09c, 0x00000000},
+ {0xa0a0, 0x00000000},
+ {0xa0a4, 0x00000000},
+ {0xa0a8, 0x00000000},
+ {0xa0ac, 0x00000000},
+ {0xa0b0, 0x00000000},
+ {0xa0b4, 0x04000000},
+ {0xa0b8, 0x00000000},
+ {0xa0bc, 0x00000000},
+ {0xa0c0, 0x00000000},
+ {0xa0c4, 0x00000000},
+ {0xa0c8, 0x00000000},
+ {0xa0cc, 0x00000000},
+ {0xa0d0, 0x00000000},
+ {0xa0d4, 0x00000000},
+ {0xa0d8, 0x00000000},
+ {0xa0dc, 0x00000000},
+ {0xa0e0, 0x00000000},
+ {0xa0e4, 0x00000000},
+ {0xa0e8, 0x00000000},
+ {0xa0ec, 0x00000000},
+ {0xa0f0, 0x00000000},
+ {0xa0f4, 0x00000000},
+ {0xa0f8, 0x04000000},
+ {0xa0fc, 0x00000000},
+ {0xa100, 0x00000000},
+ {0xa104, 0x00000000},
+ {0xa108, 0x00000000},
+ {0xa10c, 0x00000000},
+ {0xa110, 0x00000000},
+ {0xa114, 0x00000000},
+ {0xa118, 0x00000000},
+ {0xa11c, 0x00000000},
+ {0xa120, 0x00000000},
+ {0xa124, 0x00000000},
+ {0xa128, 0x00000000},
+ {0xa12c, 0x00000000},
+ {0xa130, 0x00000000},
+ {0xa134, 0x00000000},
+ {0xa138, 0x00000000},
+ {0xa13c, 0x04000000},
+ {0xa140, 0x00000000},
+ {0xa144, 0x00000000},
+ {0xa148, 0x00000000},
+ {0xa14c, 0x00000000},
+ {0xa150, 0x00000000},
+ {0xa154, 0x00000000},
+ {0xa158, 0x00000000},
+ {0xa15c, 0x00000000},
+ {0xa160, 0x00000000},
+ {0xa164, 0x00000000},
+ {0xa168, 0x00000000},
+ {0xa16c, 0x00000000},
+ {0xa170, 0x00000000},
+ {0xa174, 0x00000000},
+ {0xa178, 0x00000000},
+ {0xa17c, 0x00000000},
+ {0xa180, 0x04000000},
+ {0xa184, 0x00000000},
+ {0xa188, 0x00000000},
+ {0xa18c, 0x00000000},
+ {0xa190, 0x00000000},
+ {0xa194, 0x00000000},
+ {0xa198, 0x00000000},
+ {0xa19c, 0x00000000},
+ {0xa1a0, 0x00000000},
+ {0xa1a4, 0x00000000},
+ {0xa1a8, 0x00000000},
+ {0xa1ac, 0x00000000},
+ {0xa1b0, 0x00000000},
+ {0xa1b4, 0x00000000},
+ {0xa1b8, 0x00000000},
+ {0xa1bc, 0x00000000},
+ {0xa1c0, 0x00000000},
+ {0xa1c4, 0x04000000},
+ {0xa1c8, 0x00000000},
+ {0xa1cc, 0x00000000},
+ {0xa1d0, 0x00000000},
+ {0xa1d4, 0x00000000},
+ {0xa1d8, 0x00000000},
+ {0xa1dc, 0x00000000},
+ {0xa1e0, 0x00000000},
+ {0xa1e4, 0x00000000},
+ {0xa1e8, 0x00000000},
+ {0xa1ec, 0x00000000},
+ {0xa1f0, 0x00000000},
+ {0xa1f4, 0x00000000},
+ {0xa1f8, 0x00000000},
+ {0xa1fc, 0x00000000},
+ {0xa200, 0x00000000},
+ {0xa204, 0x00000000},
+ {0xa208, 0x04000000},
+ {0xa20c, 0x00000000},
+ {0xa210, 0x00000000},
+ {0xa214, 0x00000000},
+ {0xa218, 0x00000000},
+ {0xa21c, 0x00000000},
+ {0xa220, 0x00000000},
+ {0xa224, 0x00000000},
+ {0xa228, 0x00000000},
+ {0xa22c, 0x00000000},
+ {0xa230, 0x00000000},
+ {0xa234, 0x00000000},
+ {0xa238, 0x00000000},
+ {0xa23c, 0x00000000},
+ {0xa240, 0x00000000},
+ {0xa244, 0x00000000},
+ {0xa248, 0x00000000},
+ {0xa24c, 0x04000000},
+ {0xa250, 0x00000000},
+ {0xa254, 0x00000000},
+ {0xa258, 0x00000000},
+ {0xa25c, 0x00000000},
+ {0xa260, 0x00000000},
+ {0xa264, 0x00000000},
+ {0xa268, 0x00000000},
+ {0xa26c, 0x00000000},
+ {0xa270, 0x00000000},
+ {0xa274, 0x00000000},
+ {0xa278, 0x00000000},
+ {0xa27c, 0x00000000},
+ {0xa280, 0x00000000},
+ {0xa284, 0x00000000},
+ {0xa288, 0x00000000},
+ {0xa28c, 0x00000000},
+ {0xa290, 0x04000000},
+ {0xa294, 0x00000000},
+ {0xa298, 0x00000000},
+ {0xa29c, 0x00000000},
+ {0xa2a0, 0x00000000},
+ {0xa2a4, 0x00000000},
+ {0xa2a8, 0x00000000},
+ {0xa2ac, 0x00000000},
+ {0xa2b0, 0x00000000},
+ {0xa2b4, 0x00000000},
+ {0xa2b8, 0x00000000},
+ {0xa2bc, 0x00000000},
+ {0xa2c0, 0x00000000},
+ {0xa2c4, 0x00000000},
+ {0xa2c8, 0x00000000},
+ {0xa2cc, 0x00000000},
+ {0xa2d0, 0x00000000},
+ {0xa2d4, 0x04000000},
+ {0xa2d8, 0x00000000},
+ {0xa2dc, 0x00000000},
+ {0xa2e0, 0x00000000},
+ {0xa2e4, 0x00000000},
+ {0xa2e8, 0x00000000},
+ {0xa2ec, 0x00000000},
+ {0xa2f0, 0x00000000},
+ {0xa2f4, 0x00000000},
+ {0xa2f8, 0x00000000},
+ {0xa2fc, 0x00000000},
+ {0xa300, 0x00000000},
+ {0xa304, 0x00000000},
+ {0xa308, 0x00000000},
+ {0xa30c, 0x00000000},
+ {0xa310, 0x00000000},
+ {0xa314, 0x00000000},
+ {0xa318, 0x04000000},
+ {0xa31c, 0x00000000},
+ {0xa320, 0x00000000},
+ {0xa324, 0x00000000},
+ {0xa328, 0x00000000},
+ {0xa32c, 0x00000000},
+ {0xa330, 0x00000000},
+ {0xa334, 0x00000000},
+ {0xa338, 0x00000000},
+ {0xa33c, 0x00000000},
+ {0xa340, 0x00000000},
+ {0xa344, 0x00000000},
+ {0xa348, 0x00000000},
+ {0xa34c, 0x00000000},
+ {0xa350, 0x00000000},
+ {0xa354, 0x00000000},
+ {0xa358, 0x00000000},
+ {0xa35c, 0x04000000},
+ {0x81d8, 0x00000000},
+ {0x82d8, 0x00000000},
+ {0xb104, 0x2b251f19},
+ {0xb108, 0x433d3731},
+ {0xb10c, 0x5b554f49},
+ {0xb110, 0x736d6761},
+ {0xb114, 0x7f7f7f79},
+ {0xb118, 0x120f7f7f},
+ {0xb11c, 0x1e1b1815},
+ {0xb120, 0x2a272421},
+ {0xb124, 0x3633302d},
+ {0xb128, 0x3f3f3c39},
+ {0xb12c, 0x3f3f3f3f},
+ {0x8088, 0x00000110},
+ {0x8000, 0x00000008},
+ {0x8080, 0x00000005},
+ {0x8500, 0x80000008},
+ {0x8504, 0x43000004},
+ {0x8508, 0x4b044a00},
+ {0x850c, 0x40098604},
+ {0x8510, 0x0004e024},
+ {0x8514, 0x87044b05},
+ {0x8518, 0xe024400b},
+ {0x851c, 0x4b000004},
+ {0x8520, 0x21e07410},
+ {0x8524, 0x16580000},
+ {0x8528, 0x00047430},
+ {0x852c, 0x00074380},
+ {0x8530, 0x00044c00},
+ {0x8534, 0x00074300},
+ {0x8538, 0x00045603},
+ {0x853c, 0x42fe5700},
+ {0x8540, 0x42004000},
+ {0x8544, 0x30005055},
+ {0x8548, 0xa512b41c},
+ {0x854c, 0xf02fe66f},
+ {0x8550, 0xf22ff12f},
+ {0x8554, 0xf42ff32f},
+ {0x8558, 0xf62ff52f},
+ {0x855c, 0xf82ff72f},
+ {0x8560, 0xfa2ff92f},
+ {0x8564, 0xfc2ffb2f},
+ {0x8568, 0xfe2ffd2f},
+ {0x856c, 0xe66fff2f},
+ {0x8570, 0xf12ef02e},
+ {0x8574, 0xf32ef22e},
+ {0x8578, 0xf52ef42e},
+ {0x857c, 0xff2ef62e},
+ {0x8580, 0xa511000b},
+ {0x8584, 0xf12cf02c},
+ {0x8588, 0xf32cf22c},
+ {0x858c, 0xf52cf42c},
+ {0x8590, 0xf72cf62c},
+ {0x8594, 0xf92cf82c},
+ {0x8598, 0xfb2cfa2c},
+ {0x859c, 0xfd2cfc2c},
+ {0x85a0, 0xff2cfe2c},
+ {0x85a4, 0xf12cf02c},
+ {0x85a8, 0x0001f22c},
+ {0x85ac, 0x30b330b3},
+ {0x85b0, 0x310c3125},
+ {0x85b4, 0x31253161},
+ {0x85b8, 0x3081316f},
+ {0x85bc, 0x317f3172},
+ {0x85c0, 0x3192318c},
+ {0x85c4, 0x32b832a6},
+ {0x85c8, 0x31fd32c2},
+ {0x85cc, 0x330732cc},
+ {0x85d0, 0x33193343},
+ {0x85d4, 0x331d3312},
+ {0x85d8, 0x31663316},
+ {0x85dc, 0x3365335b},
+ {0x85e0, 0x3379336f},
+ {0x85e4, 0x338d3383},
+ {0x85e8, 0x33a13397},
+ {0x85ec, 0x33b833ab},
+ {0x85f0, 0x33d733c9},
+ {0x85f4, 0x342333db},
+ {0x85f8, 0x343c343b},
+ {0x85fc, 0x3471346f},
+ {0x8600, 0xe493347c},
+ {0x8604, 0x20887410},
+ {0x8608, 0x140f0200},
+ {0x860c, 0x02002098},
+ {0x8610, 0x20a8140f},
+ {0x8614, 0x140f0200},
+ {0x8618, 0xe4df7430},
+ {0x861c, 0x74105b10},
+ {0x8620, 0x000120a0},
+ {0x8624, 0x140f140f},
+ {0x8628, 0x56e15507},
+ {0x862c, 0xe4c95c06},
+ {0x8630, 0x20a87410},
+ {0x8634, 0x140f0201},
+ {0x8638, 0xe4c95517},
+ {0x863c, 0x20a87410},
+ {0x8640, 0x140f0200},
+ {0x8644, 0x56c15517},
+ {0x8648, 0xe4c95c02},
+ {0x864c, 0x20a07410},
+ {0x8650, 0x140f0000},
+ {0x8654, 0x55071407},
+ {0x8658, 0xe47ee4c9},
+ {0x865c, 0x4686750a},
+ {0x8660, 0xe159e4d3},
+ {0x8664, 0xe4930001},
+ {0x8668, 0x20a87410},
+ {0x866c, 0x140f0200},
+ {0x8670, 0x02002098},
+ {0x8674, 0x2088140f},
+ {0x8678, 0x140f0200},
+ {0x867c, 0xe4df7430},
+ {0x8680, 0x74105b10},
+ {0x8684, 0x020120a8},
+ {0x8688, 0x2080140f},
+ {0x868c, 0x140f0000},
+ {0x8690, 0x56615507},
+ {0x8694, 0xe4c95c06},
+ {0x8698, 0x20887410},
+ {0x869c, 0x140f0200},
+ {0x86a0, 0xe4c95517},
+ {0x86a4, 0x20a87410},
+ {0x86a8, 0x140f0200},
+ {0x86ac, 0x56415517},
+ {0x86b0, 0xe4c95c02},
+ {0x86b4, 0x20807410},
+ {0x86b8, 0x140f0000},
+ {0x86bc, 0x55071407},
+ {0x86c0, 0xe47ee4c9},
+ {0x86c4, 0x468e7508},
+ {0x86c8, 0xe159e4d3},
+ {0x86cc, 0x5b10f025},
+ {0x86d0, 0x20a87410},
+ {0x86d4, 0x140f0201},
+ {0x86d8, 0x00002090},
+ {0x86dc, 0x5507140f},
+ {0x86e0, 0x5c065661},
+ {0x86e4, 0x7410e4c9},
+ {0x86e8, 0x02002098},
+ {0x86ec, 0x5517140f},
+ {0x86f0, 0x7410e4c9},
+ {0x86f4, 0x020020a8},
+ {0x86f8, 0x5517140f},
+ {0x86fc, 0x5c025641},
+ {0x8700, 0x7410e4c9},
+ {0x8704, 0x00002090},
+ {0x8708, 0x5507140f},
+ {0x870c, 0x7509e4c9},
+ {0x8710, 0xe4d34696},
+ {0x8714, 0x0001e159},
+ {0x8718, 0x74105b10},
+ {0x871c, 0x000020a0},
+ {0x8720, 0x5507140f},
+ {0x8724, 0xe4c95601},
+ {0x8728, 0x20a87410},
+ {0x872c, 0x140f0200},
+ {0x8730, 0xe4c95517},
+ {0x8734, 0x750ae47e},
+ {0x8738, 0xe4d34686},
+ {0x873c, 0x5500e159},
+ {0x8740, 0x5501e4c5},
+ {0x8744, 0xe4930001},
+ {0x8748, 0x5b10e4df},
+ {0x874c, 0x20807410},
+ {0x8750, 0x140f0000},
+ {0x8754, 0x02002098},
+ {0x8758, 0xf205140f},
+ {0x875c, 0x20a8f504},
+ {0x8760, 0x140f0200},
+ {0x8764, 0x56015507},
+ {0x8768, 0x7410e4c9},
+ {0x876c, 0x02002088},
+ {0x8770, 0x5517140f},
+ {0x8774, 0xe47ee4c9},
+ {0x8778, 0x468e7508},
+ {0x877c, 0xe159e4d3},
+ {0x8780, 0x7410f512},
+ {0x8784, 0x00002090},
+ {0x8788, 0x5507140f},
+ {0x878c, 0x7410e4c9},
+ {0x8790, 0x02002098},
+ {0x8794, 0x5517140f},
+ {0x8798, 0x7509e4c9},
+ {0x879c, 0xe4d34696},
+ {0x87a0, 0x0001e159},
+ {0x87a4, 0x46965b90},
+ {0x87a8, 0xe4c55500},
+ {0x87ac, 0x5b105501},
+ {0x87b0, 0x79000001},
+ {0x87b4, 0x57107420},
+ {0x87b8, 0x140f5700},
+ {0x87bc, 0x74309700},
+ {0x87c0, 0xe4930001},
+ {0x87c4, 0x0bbde4df},
+ {0x87c8, 0x0001e662},
+ {0x87cc, 0x5720e493},
+ {0x87d0, 0x540054fd},
+ {0x87d4, 0x70005700},
+ {0x87d8, 0x70c0e4dd},
+ {0x87dc, 0xe4a90001},
+ {0x87e0, 0x0001e512},
+ {0x87e4, 0x31abe493},
+ {0x87e8, 0xe6620023},
+ {0x87ec, 0x54ed0002},
+ {0x87f0, 0x00230baa},
+ {0x87f4, 0x0002e662},
+ {0x87f8, 0xe486e52e},
+ {0x87fc, 0xe4930001},
+ {0x8800, 0x002231a1},
+ {0x8804, 0x0002e662},
+ {0x8808, 0x0baa54ec},
+ {0x880c, 0xe6620022},
+ {0x8810, 0xe52e0002},
+ {0x8814, 0x0001e486},
+ {0x8818, 0x0baae493},
+ {0x881c, 0xe52e3194},
+ {0x8820, 0x0001e486},
+ {0x8824, 0x0babe493},
+ {0x8828, 0x6d0f6c67},
+ {0x882c, 0xe662e4df},
+ {0x8830, 0x6c8bfb04},
+ {0x8834, 0xe662e4df},
+ {0x8838, 0x6c95fa04},
+ {0x883c, 0xe662e4df},
+ {0x8840, 0x0bacfb06},
+ {0x8844, 0x6d0f6cb3},
+ {0x8848, 0xe662e4df},
+ {0x884c, 0xf904fa05},
+ {0x8850, 0xe4df6ccb},
+ {0x8854, 0xfb06e662},
+ {0x8858, 0x6cdb0bad},
+ {0x885c, 0xe4df6d0f},
+ {0x8860, 0x6cf5e662},
+ {0x8864, 0xe4df6d0f},
+ {0x8868, 0x6c0be662},
+ {0x886c, 0xe4df6d00},
+ {0x8870, 0xfb04e662},
+ {0x8874, 0xe4df6c25},
+ {0x8878, 0xf8b7e662},
+ {0x887c, 0xf904fa05},
+ {0x8880, 0xe4df6c35},
+ {0x8884, 0xfb04e662},
+ {0x8888, 0xe4df6c4d},
+ {0x888c, 0xf9bae662},
+ {0x8890, 0x6c6bfa04},
+ {0x8894, 0xe662e4df},
+ {0x8898, 0x6c75fb04},
+ {0x889c, 0xe662e4df},
+ {0x88a0, 0xe4df6c99},
+ {0x88a4, 0xfabce662},
+ {0x88a8, 0x57200ba8},
+ {0x88ac, 0x540054f0},
+ {0x88b0, 0x7c355700},
+ {0x88b4, 0x70007d00},
+ {0x88b8, 0x6d0e6cc5},
+ {0x88bc, 0xe662e4dd},
+ {0x88c0, 0xe4dd6cf5},
+ {0x88c4, 0x6c29e662},
+ {0x88c8, 0xe4dd6d0f},
+ {0x88cc, 0x0bb3e662},
+ {0x88d0, 0x54ed5720},
+ {0x88d4, 0x57005400},
+ {0x88d8, 0x7d0f7ccb},
+ {0x88dc, 0x6d006cd7},
+ {0x88e0, 0xe662e4dd},
+ {0x88e4, 0x6d016c0b},
+ {0x88e8, 0xe662e4dd},
+ {0x88ec, 0xe4dd6c3b},
+ {0x88f0, 0x70c0e662},
+ {0x88f4, 0xe486e52e},
+ {0x88f8, 0xe4a90001},
+ {0x88fc, 0x63424380},
+ {0x8900, 0x43006887},
+ {0x8904, 0x74100ba6},
+ {0x8908, 0x000121e8},
+ {0x890c, 0x6ec71658},
+ {0x8910, 0xe5126f0e},
+ {0x8914, 0x7410e667},
+ {0x8918, 0x000321e8},
+ {0x891c, 0x6eeb1658},
+ {0x8920, 0xe667e512},
+ {0x8924, 0x21e87410},
+ {0x8928, 0x16580005},
+ {0x892c, 0x6f0f6e13},
+ {0x8930, 0xe667e512},
+ {0x8934, 0x21e87410},
+ {0x8938, 0x16580007},
+ {0x893c, 0xe5126e3b},
+ {0x8940, 0x7410e667},
+ {0x8944, 0x000921e8},
+ {0x8948, 0x6e671658},
+ {0x894c, 0xe5126f0f},
+ {0x8950, 0x7410e667},
+ {0x8954, 0x000b21e8},
+ {0x8958, 0x6e8b1658},
+ {0x895c, 0xe667e512},
+ {0x8960, 0x21e87410},
+ {0x8964, 0x1658000d},
+ {0x8968, 0x6f0f6eb3},
+ {0x896c, 0xe667e512},
+ {0x8970, 0xfe08ff09},
+ {0x8974, 0x21e87410},
+ {0x8978, 0x1658000e},
+ {0x897c, 0xe5126ec7},
+ {0x8980, 0x7410e667},
+ {0x8984, 0x000f21e8},
+ {0x8988, 0x6edb1658},
+ {0x898c, 0xe5126f0f},
+ {0x8990, 0x7410e667},
+ {0x8994, 0x001021e8},
+ {0x8998, 0x6eef1658},
+ {0x899c, 0xe667e512},
+ {0x89a0, 0xfe02ff03},
+ {0x89a4, 0x7410e667},
+ {0x89a8, 0x001321e8},
+ {0x89ac, 0x6e111658},
+ {0x89b0, 0xe5126f00},
+ {0x89b4, 0xff03e667},
+ {0x89b8, 0xe667fe02},
+ {0x89bc, 0x21e87410},
+ {0x89c0, 0x16580014},
+ {0x89c4, 0xe5126e25},
+ {0x89c8, 0xfc48e667},
+ {0x89cc, 0xfe08ff09},
+ {0x89d0, 0x21e87410},
+ {0x89d4, 0x16580015},
+ {0x89d8, 0xe5126e39},
+ {0x89dc, 0x7410e667},
+ {0x89e0, 0x001621e8},
+ {0x89e4, 0x6e4d1658},
+ {0x89e8, 0xe667e512},
+ {0x89ec, 0x7410fd49},
+ {0x89f0, 0x001821e8},
+ {0x89f4, 0x6e751658},
+ {0x89f8, 0xe667e512},
+ {0x89fc, 0x21e87410},
+ {0x8a00, 0x1658001a},
+ {0x8a04, 0xe5126e99},
+ {0x8a08, 0xfe44e667},
+ {0x8a0c, 0x21e87410},
+ {0x8a10, 0x1658001c},
+ {0x8a14, 0xe5126ec5},
+ {0x8a18, 0x7410e667},
+ {0x8a1c, 0x001e21e8},
+ {0x8a20, 0x6eed1658},
+ {0x8a24, 0xe667e512},
+ {0x8a28, 0x21e87410},
+ {0x8a2c, 0x16580020},
+ {0x8a30, 0x6f016e15},
+ {0x8a34, 0xe667e512},
+ {0x8a38, 0x21e87410},
+ {0x8a3c, 0x16580022},
+ {0x8a40, 0xe5126e39},
+ {0x8a44, 0xe52ee667},
+ {0x8a48, 0x0001e49c},
+ {0x8a4c, 0x4380e4a9},
+ {0x8a50, 0x68806340},
+ {0x8a54, 0x0bac4300},
+ {0x8a58, 0x00223241},
+ {0x8a5c, 0x0002e667},
+ {0x8a60, 0x0baa54ec},
+ {0x8a64, 0xe6670022},
+ {0x8a68, 0xe52e0002},
+ {0x8a6c, 0x0001e49c},
+ {0x8a70, 0x4380e4a9},
+ {0x8a74, 0x68816340},
+ {0x8a78, 0x0baa4300},
+ {0x8a7c, 0xe52e3230},
+ {0x8a80, 0x0001e49c},
+ {0x8a84, 0x4380e4a9},
+ {0x8a88, 0x68826341},
+ {0x8a8c, 0x0baa4300},
+ {0x8a90, 0xe52e3221},
+ {0x8a94, 0x0001e49c},
+ {0x8a98, 0x42fc0004},
+ {0x8a9c, 0x60010007},
+ {0x8aa0, 0x42000004},
+ {0x8aa4, 0x62200007},
+ {0x8aa8, 0x00046200},
+ {0x8aac, 0x5b405501},
+ {0x8ab0, 0x00076605},
+ {0x8ab4, 0x63006200},
+ {0x8ab8, 0x0004e54f},
+ {0x8abc, 0x0a010900},
+ {0x8ac0, 0x0d000b40},
+ {0x8ac4, 0x00320e01},
+ {0x8ac8, 0x95090004},
+ {0x8acc, 0x790442fb},
+ {0x8ad0, 0x43804200},
+ {0x8ad4, 0x4d010007},
+ {0x8ad8, 0x43000004},
+ {0x8adc, 0x05620007},
+ {0x8ae0, 0x961d05a3},
+ {0x8ae4, 0x0004e54f},
+ {0x8ae8, 0x0007e4c5},
+ {0x8aec, 0x07a306a2},
+ {0x8af0, 0x0004e54f},
+ {0x8af4, 0xe53fe4c5},
+ {0x8af8, 0xe5470002},
+ {0x8afc, 0x00074380},
+ {0x8b00, 0x00044d00},
+ {0x8b04, 0x42fe4300},
+ {0x8b08, 0x42007900},
+ {0x8b0c, 0x00040001},
+ {0x8b10, 0x000742fc},
+ {0x8b14, 0x00046003},
+ {0x8b18, 0x32d24200},
+ {0x8b1c, 0x06a20007},
+ {0x8b20, 0x32fc07a3},
+ {0x8b24, 0xe32ee320},
+ {0x8b28, 0x0001e333},
+ {0x8b2c, 0xe333e320},
+ {0x8b30, 0xe3270001},
+ {0x8b34, 0xe333e32e},
+ {0x8b38, 0xe3270001},
+ {0x8b3c, 0x0001e333},
+ {0x8b40, 0x42fc0004},
+ {0x8b44, 0x60030007},
+ {0x8b48, 0x42000004},
+ {0x8b4c, 0x00040001},
+ {0x8b50, 0x000742fc},
+ {0x8b54, 0x00046001},
+ {0x8b58, 0x00014200},
+ {0x8b5c, 0x62200007},
+ {0x8b60, 0xe5476200},
+ {0x8b64, 0x00070001},
+ {0x8b68, 0x00046300},
+ {0x8b6c, 0x0a000900},
+ {0x8b70, 0x00320e01},
+ {0x8b74, 0x06a20007},
+ {0x8b78, 0xe559e54f},
+ {0x8b7c, 0x42fe0002},
+ {0x8b80, 0x42007900},
+ {0x8b84, 0x00050001},
+ {0x8b88, 0x00077700},
+ {0x8b8c, 0x00045200},
+ {0x8b90, 0x000742fe},
+ {0x8b94, 0x00046000},
+ {0x8b98, 0x43804200},
+ {0x8b9c, 0x61006000},
+ {0x8ba0, 0x63106201},
+ {0x8ba4, 0x00056804},
+ {0x8ba8, 0x55004100},
+ {0x8bac, 0x5c020007},
+ {0x8bb0, 0x43000004},
+ {0x8bb4, 0x00050001},
+ {0x8bb8, 0xe3c96c06},
+ {0x8bbc, 0xe3b8e3db},
+ {0x8bc0, 0xe423e567},
+ {0x8bc4, 0xe43ce56f},
+ {0x8bc8, 0xe3b80001},
+ {0x8bcc, 0x6c060005},
+ {0x8bd0, 0xe5e6e3c9},
+ {0x8bd4, 0xe423e567},
+ {0x8bd8, 0xe43ce56f},
+ {0x8bdc, 0x00050001},
+ {0x8be0, 0xe3c96c00},
+ {0x8be4, 0xe3b8e3db},
+ {0x8be8, 0xe423e582},
+ {0x8bec, 0xe43ce58a},
+ {0x8bf0, 0xe3b80001},
+ {0x8bf4, 0x6c000005},
+ {0x8bf8, 0xe5e6e3c9},
+ {0x8bfc, 0xe423e582},
+ {0x8c00, 0xe43ce58a},
+ {0x8c04, 0x00050001},
+ {0x8c08, 0xe3c96c04},
+ {0x8c0c, 0xe3b8e3db},
+ {0x8c10, 0xe423e59d},
+ {0x8c14, 0xe43ce5a5},
+ {0x8c18, 0xe3b80001},
+ {0x8c1c, 0x6c040005},
+ {0x8c20, 0xe5e6e3c9},
+ {0x8c24, 0xe423e59d},
+ {0x8c28, 0xe43ce5a5},
+ {0x8c2c, 0x00050001},
+ {0x8c30, 0xe3c96c02},
+ {0x8c34, 0xe3b8e3db},
+ {0x8c38, 0xe423e5b8},
+ {0x8c3c, 0xe43ce5c0},
+ {0x8c40, 0xe3b80001},
+ {0x8c44, 0x6c020005},
+ {0x8c48, 0xe5e6e3c9},
+ {0x8c4c, 0xe423e5b8},
+ {0x8c50, 0xe43ce5c0},
+ {0x8c54, 0x00040001},
+ {0x8c58, 0x60084380},
+ {0x8c5c, 0x6200610a},
+ {0x8c60, 0x000663ce},
+ {0x8c64, 0x7f006080},
+ {0x8c68, 0x43000004},
+ {0x8c6c, 0x0001e618},
+ {0x8c70, 0x55000007},
+ {0x8c74, 0x74200004},
+ {0x8c78, 0x77117901},
+ {0x8c7c, 0x57005710},
+ {0x8c80, 0x7430140f},
+ {0x8c84, 0x43800004},
+ {0x8c88, 0x72000007},
+ {0x8c8c, 0x43000004},
+ {0x8c90, 0x00040001},
+ {0x8c94, 0x00057420},
+ {0x8c98, 0x7e067700},
+ {0x8c9c, 0x73807388},
+ {0x8ca0, 0x140f8f00},
+ {0x8ca4, 0x74300004},
+ {0x8ca8, 0x73000005},
+ {0x8cac, 0xe5d30001},
+ {0x8cb0, 0x73000005},
+ {0x8cb4, 0x00040001},
+ {0x8cb8, 0xb1034380},
+ {0x8cbc, 0x7cdb0006},
+ {0x8cc0, 0x00079103},
+ {0x8cc4, 0x000440db},
+ {0x8cc8, 0xe5d34300},
+ {0x8ccc, 0x73800005},
+ {0x8cd0, 0x5d010006},
+ {0x8cd4, 0x62006002},
+ {0x8cd8, 0x0005e5f7},
+ {0x8cdc, 0x00077300},
+ {0x8ce0, 0x75787608},
+ {0x8ce4, 0x43800004},
+ {0x8ce8, 0x5e010007},
+ {0x8cec, 0x140a5e00},
+ {0x8cf0, 0x63800006},
+ {0x8cf4, 0x00077f00},
+ {0x8cf8, 0x4e204c3f},
+ {0x8cfc, 0x73047280},
+ {0x8d00, 0x140a7300},
+ {0x8d04, 0x00044d20},
+ {0x8d08, 0x00064300},
+ {0x8d0c, 0x00077402},
+ {0x8d10, 0x40004001},
+ {0x8d14, 0x0006ab00},
+ {0x8d18, 0x00077404},
+ {0x8d1c, 0x40004001},
+ {0x8d20, 0x140aab00},
+ {0x8d24, 0x43800004},
+ {0x8d28, 0x52800007},
+ {0x8d2c, 0x140a5200},
+ {0x8d30, 0x4d004c00},
+ {0x8d34, 0x00064e00},
+ {0x8d38, 0x63006080},
+ {0x8d3c, 0x43000004},
+ {0x8d40, 0x76000007},
+ {0x8d44, 0x00040001},
+ {0x8d48, 0xb1034380},
+ {0x8d4c, 0x7cdb0006},
+ {0x8d50, 0x00079103},
+ {0x8d54, 0x000440db},
+ {0x8d58, 0xe5d34300},
+ {0x8d5c, 0xe5f77e03},
+ {0x8d60, 0x43800004},
+ {0x8d64, 0x0006b103},
+ {0x8d68, 0x91037c5b},
+ {0x8d6c, 0x405b0007},
+ {0x8d70, 0x43000004},
+ {0x8d74, 0x00010001},
+ {0x8d78, 0x43800004},
+ {0x8d7c, 0x4e200007},
+ {0x8d80, 0x63800006},
+ {0x8d84, 0x5f807cdb},
+ {0x8d88, 0x43000004},
+ {0x8d8c, 0x76080007},
+ {0x8d90, 0x00057560},
+ {0x8d94, 0x00047380},
+ {0x8d98, 0x0005420e},
+ {0x8d9c, 0x92006c01},
+ {0x8da0, 0x6c001432},
+ {0x8da4, 0x42000004},
+ {0x8da8, 0x43800004},
+ {0x8dac, 0x5f000006},
+ {0x8db0, 0x73010007},
+ {0x8db4, 0x00047300},
+ {0x8db8, 0x0007420f},
+ {0x8dbc, 0x52005280},
+ {0x8dc0, 0x0004140a},
+ {0x8dc4, 0x00064200},
+ {0x8dc8, 0x7c5b6300},
+ {0x8dcc, 0x4e000007},
+ {0x8dd0, 0x43000004},
+ {0x8dd4, 0x73000005},
+ {0x8dd8, 0x76000007},
+ {0x8ddc, 0xe5fb0001},
+ {0x8de0, 0x00040001},
+ {0x8de4, 0x60004380},
+ {0x8de8, 0x62016100},
+ {0x8dec, 0x00066310},
+ {0x8df0, 0x00046000},
+ {0x8df4, 0x00014300},
+ {0x8df8, 0x0001e618},
+ {0x8dfc, 0x4e004f02},
+ {0x8e00, 0x52015302},
+ {0x8e04, 0x140f0001},
+ {0x8e08, 0x00019700},
+ {0x8e0c, 0x8a084380},
+ {0x8e10, 0x7800aa09},
+ {0x8e14, 0x7a007900},
+ {0x8e18, 0x43007b40},
+ {0x8e1c, 0x65010001},
+ {0x8e20, 0x67013489},
+ {0x8e24, 0x43803489},
+ {0x8e28, 0xaa058a04},
+ {0x8e2c, 0x00014300},
+ {0x8e30, 0x34966500},
+ {0x8e34, 0x34966700},
+ {0x8e38, 0x8a084380},
+ {0x8e3c, 0x7c00aa09},
+ {0x8e40, 0x7e007d00},
+ {0x8e44, 0x43007f40},
+ {0x8e48, 0x64010001},
+ {0x8e4c, 0x6601349f},
+ {0x8e50, 0x4380349f},
+ {0x8e54, 0xaa058a04},
+ {0x8e58, 0x00014300},
+ {0x8e5c, 0x34ac6400},
+ {0x8e60, 0x34ac6600},
+ {0x8e64, 0x7b484380},
+ {0x8e68, 0x79007a90},
+ {0x8e6c, 0x43007802},
+ {0x8e70, 0x34c95503},
+ {0x8e74, 0x7b384380},
+ {0x8e78, 0x43007a80},
+ {0x8e7c, 0x34c95513},
+ {0x8e80, 0x7b404380},
+ {0x8e84, 0x43007a00},
+ {0x8e88, 0x74015523},
+ {0x8e8c, 0x8e007400},
+ {0x8e90, 0x00070001},
+ {0x8e94, 0x00045230},
+ {0x8e98, 0x74307431},
+ {0x8e9c, 0x00078e00},
+ {0x8ea0, 0x00045220},
+ {0x8ea4, 0x57020001},
+ {0x8ea8, 0x8e005700},
+ {0x8eac, 0x42ef0001},
+ {0x8eb0, 0x56005610},
+ {0x8eb4, 0x8c004200},
+ {0x8eb8, 0x5b500001},
+ {0x8ebc, 0x5b2034e0},
+ {0x8ec0, 0x4e004f78},
+ {0x8ec4, 0x52015388},
+ {0x8ec8, 0x4e004f78},
+ {0x8ecc, 0x52015388},
+ {0x8ed0, 0x5480e4f2},
+ {0x8ed4, 0x54815400},
+ {0x8ed8, 0x54825400},
+ {0x8edc, 0xe4fd5400},
+ {0x8ee0, 0x3010bf1d},
+ {0x8ee4, 0xe4bae4b2},
+ {0x8ee8, 0xe4d3e4c0},
+ {0x8eec, 0x5523e65b},
+ {0x8ef0, 0x5525e4c9},
+ {0x8ef4, 0xe65be4d3},
+ {0x8ef8, 0x54bf0001},
+ {0x8efc, 0x54a354c0},
+ {0x8f00, 0x54a454c1},
+ {0x8f04, 0xbf074c18},
+ {0x8f08, 0x54a454c2},
+ {0x8f0c, 0x54c1bf04},
+ {0x8f10, 0xbf0154a3},
+ {0x8f14, 0x54dfe66c},
+ {0x8f18, 0x54bf0001},
+ {0x8f1c, 0x050a54e5},
+ {0x8f20, 0x000154df},
+ {0x8f24, 0x7b801657},
+ {0x8f28, 0x43807430},
+ {0x8f2c, 0x7e007f40},
+ {0x8f30, 0x7c027d00},
+ {0x8f34, 0x5b404300},
+ {0x8f38, 0x5c015501},
+ {0x8f3c, 0x5480e4d7},
+ {0x8f40, 0x54815400},
+ {0x8f44, 0x54825400},
+ {0x8f48, 0x7b005400},
+ {0x8f4c, 0xe4fd7410},
+ {0x8f50, 0x3010bfe5},
+ {0x8f54, 0x56005610},
+ {0x8f58, 0x00018c00},
+ {0x8f5c, 0x57005704},
+ {0x8f60, 0x57088e00},
+ {0x8f64, 0x8e005700},
+ {0x8f68, 0x57805781},
+ {0x8f6c, 0x43808e00},
+ {0x8f70, 0x5c010007},
+ {0x8f74, 0x14035c00},
+ {0x8f78, 0x43000004},
+ {0x8f7c, 0x427f0001},
+ {0x8f80, 0x62800007},
+ {0x8f84, 0x92006200},
+ {0x8f88, 0x42000004},
+ {0x8f8c, 0x427f0001},
+ {0x8f90, 0x63940007},
+ {0x8f94, 0x92006314},
+ {0x8f98, 0x42000004},
+ {0x8f9c, 0x00040001},
+ {0x8fa0, 0x790142fe},
+ {0x8fa4, 0x74204200},
+ {0x8fa8, 0x5710140f},
+ {0x8fac, 0x141f5700},
+ {0x8fb0, 0x00040001},
+ {0x8fb4, 0x790142fe},
+ {0x8fb8, 0x74204200},
+ {0x8fbc, 0x42bf140f},
+ {0x8fc0, 0x62400007},
+ {0x8fc4, 0x141f6200},
+ {0x8fc8, 0x42000004},
+ {0x8fcc, 0x00060001},
+ {0x8fd0, 0x60035d06},
+ {0x8fd4, 0x62016104},
+ {0x8fd8, 0x73100005},
+ {0x8fdc, 0x00040001},
+ {0x8fe0, 0x00074380},
+ {0x8fe4, 0x5e005e01},
+ {0x8fe8, 0xb103140a},
+ {0x8fec, 0x7f070006},
+ {0x8ff0, 0x00079103},
+ {0x8ff4, 0x00064307},
+ {0x8ff8, 0x5d025c00},
+ {0x8ffc, 0x00045e03},
+ {0x9000, 0x00014300},
+ {0x9004, 0x5d040006},
+ {0x9008, 0x61046000},
+ {0x900c, 0x00056201},
+ {0x9010, 0x00017310},
+ {0x9014, 0x43800004},
+ {0x9018, 0x5e010007},
+ {0x901c, 0x140a5e00},
+ {0x9020, 0x0006b103},
+ {0x9024, 0x91037fc6},
+ {0x9028, 0x43c60007},
+ {0x902c, 0x5c000006},
+ {0x9030, 0x5e035d02},
+ {0x9034, 0x43000004},
+ {0x9038, 0x00060001},
+ {0x903c, 0x60005d04},
+ {0x9040, 0x62016104},
+ {0x9044, 0x73100005},
+ {0x9048, 0x00040001},
+ {0x904c, 0x00074380},
+ {0x9050, 0x5e005e01},
+ {0x9054, 0xb103140a},
+ {0x9058, 0x7fc60006},
+ {0x905c, 0x00079103},
+ {0x9060, 0x000643c6},
+ {0x9064, 0x5d025c00},
+ {0x9068, 0x00045e03},
+ {0x906c, 0x00014300},
+ {0x9070, 0x5d000006},
+ {0x9074, 0x61006002},
+ {0x9078, 0x00056201},
+ {0x907c, 0x00017300},
+ {0x9080, 0x43800004},
+ {0x9084, 0x5e010007},
+ {0x9088, 0x140a5e00},
+ {0x908c, 0x0006b103},
+ {0x9090, 0x91037fc0},
+ {0x9094, 0x43c00007},
+ {0x9098, 0x5c000006},
+ {0x909c, 0x5e035d02},
+ {0x90a0, 0x43000004},
+ {0x90a4, 0x00050001},
+ {0x90a8, 0x00047e02},
+ {0x90ac, 0x000542f7},
+ {0x90b0, 0x00046c08},
+ {0x90b4, 0x00054270},
+ {0x90b8, 0x73807381},
+ {0x90bc, 0x00049300},
+ {0x90c0, 0x000542f7},
+ {0x90c4, 0x00046c00},
+ {0x90c8, 0x00014200},
+ {0x90cc, 0x43800004},
+ {0x90d0, 0x73040007},
+ {0x90d4, 0x14057300},
+ {0x90d8, 0x00047240},
+ {0x90dc, 0x00064300},
+ {0x90e0, 0x00077404},
+ {0x90e4, 0x40004001},
+ {0x90e8, 0x140fab00},
+ {0x90ec, 0xe64f0001},
+ {0x90f0, 0xe656e5fb},
+ {0x90f4, 0x00040001},
+ {0x90f8, 0x00047410},
+ {0x90fc, 0x42f04380},
+ {0x9100, 0x62080007},
+ {0x9104, 0x24206301},
+ {0x9108, 0x14c80000},
+ {0x910c, 0x00002428},
+ {0x9110, 0x1a4215f4},
+ {0x9114, 0x6300000b},
+ {0x9118, 0x42000004},
+ {0x911c, 0x74304300},
+ {0x9120, 0x4380140f},
+ {0x9124, 0x73080007},
+ {0x9128, 0x00047300},
+ {0x912c, 0x00014300},
+ {0x9130, 0x4bf00007},
+ {0x9134, 0x490b4a8f},
+ {0x9138, 0x4a8e48f1},
+ {0x913c, 0x48a5490a},
+ {0x9140, 0x49094a8d},
+ {0x9144, 0x4a8c487d},
+ {0x9148, 0x48754908},
+ {0x914c, 0x49074a8b},
+ {0x9150, 0x4a8a4889},
+ {0x9154, 0x48b74906},
+ {0x9158, 0x49054a89},
+ {0x915c, 0x4a8848fc},
+ {0x9160, 0x48564905},
+ {0x9164, 0x49044a87},
+ {0x9168, 0x4a8648c1},
+ {0x916c, 0x483d4904},
+ {0x9170, 0x49034a85},
+ {0x9174, 0x4a8448c7},
+ {0x9178, 0x485e4903},
+ {0x917c, 0x49024a83},
+ {0x9180, 0x4a8248ac},
+ {0x9184, 0x48624902},
+ {0x9188, 0x49024a81},
+ {0x918c, 0x4a804820},
+ {0x9190, 0x48004900},
+ {0x9194, 0x49014a90},
+ {0x9198, 0x4a10481f},
+ {0x919c, 0x00060001},
+ {0x91a0, 0x5f005f80},
+ {0x91a4, 0x00059900},
+ {0x91a8, 0x00017300},
+ {0x91ac, 0x63800006},
+ {0x91b0, 0x98006300},
+ {0x91b4, 0x549f0001},
+ {0x91b8, 0x5c015400},
+ {0x91bc, 0x540054df},
+ {0x91c0, 0x00015c02},
+ {0x91c4, 0x07145c01},
+ {0x91c8, 0x5c025400},
+ {0x91cc, 0x5c020001},
+ {0x91d0, 0x54000714},
+ {0x91d4, 0x00015c01},
+ {0x91d8, 0x4c184c98},
+ {0x91dc, 0x00080001},
+ {0x91e0, 0x5c020004},
+ {0x91e4, 0x09017430},
+ {0x91e8, 0x0ba60c01},
+ {0x91ec, 0x77800005},
+ {0x91f0, 0x52200007},
+ {0x91f4, 0x43800004},
+ {0x91f8, 0x610a6008},
+ {0x91fc, 0x63c26200},
+ {0x9200, 0x5c000007},
+ {0x9204, 0x43000004},
+ {0x9208, 0x00000001},
+ {0x8080, 0x00000004},
+ {0x8080, 0x00000000},
+ {0x8088, 0x00000000},
+};
+
+static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = {
+ { 0, 0, 0, 0, 4, 0x50505050, },
+ { 0, 0, 1, 0, 4, 0x50505050, },
+ { 0, 0, 1, 4, 4, 0x484c5050, },
+ { 0, 0, 2, 0, 4, 0x50505050, },
+ { 0, 0, 2, 4, 4, 0x44484c50, },
+ { 0, 0, 2, 8, 4, 0x34383c40, },
+ { 0, 0, 3, 0, 4, 0x50505050, },
+ { 0, 1, 2, 0, 4, 0x50505050, },
+ { 0, 1, 2, 4, 4, 0x44484c50, },
+ { 0, 1, 2, 8, 4, 0x34383c40, },
+ { 0, 1, 3, 0, 4, 0x50505050, },
+ { 0, 0, 4, 1, 4, 0x00000000, },
+ { 0, 0, 4, 0, 1, 0x00000000, },
+ { 1, 0, 1, 0, 4, 0x5054585c, },
+ { 1, 0, 1, 4, 4, 0x4044484c, },
+ { 1, 0, 2, 0, 4, 0x4c505458, },
+ { 1, 0, 2, 4, 4, 0x3c404448, },
+ { 1, 0, 2, 8, 4, 0x2c303438, },
+ { 1, 0, 3, 0, 4, 0x3c40484c, },
+ { 1, 1, 2, 0, 4, 0x4c505458, },
+ { 1, 1, 2, 4, 4, 0x3c404448, },
+ { 1, 1, 2, 8, 4, 0x2c303438, },
+ { 1, 1, 3, 0, 4, 0x3c40484c, },
+ { 1, 0, 4, 0, 4, 0x00000000, },
+ { 2, 0, 1, 0, 4, 0x5054585c, },
+ { 2, 0, 1, 4, 4, 0x4044484c, },
+ { 2, 0, 2, 0, 4, 0x4c505458, },
+ { 2, 0, 2, 4, 4, 0x3c404448, },
+ { 2, 0, 2, 8, 4, 0x2c303438, },
+ { 2, 0, 3, 0, 4, 0x3c40484c, },
+ { 2, 1, 2, 0, 4, 0x4c505458, },
+ { 2, 1, 2, 4, 4, 0x3c404448, },
+ { 2, 1, 2, 8, 4, 0x2c303438, },
+ { 2, 1, 3, 0, 4, 0x3c40484c, },
+ { 2, 0, 4, 0, 4, 0x00000000, },
+};
+
+static const s8 _txpwr_track_delta_swingidx_6gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
+ 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3},
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2,
+ -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3
+};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_p[] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2,
+ -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
+};
+
+const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM] = {
+ [0][0][RTW89_ACMA] = 0,
+ [0][0][RTW89_ETSI] = 0,
+ [0][0][RTW89_FCC] = 1,
+ [0][0][RTW89_IC] = 1,
+ [0][0][RTW89_MKK] = 0,
+ [0][1][RTW89_ACMA] = 0,
+ [0][1][RTW89_ETSI] = 0,
+ [0][1][RTW89_FCC] = 3,
+ [0][1][RTW89_IC] = 3,
+ [0][1][RTW89_MKK] = 0,
+ [1][1][RTW89_ACMA] = 0,
+ [1][1][RTW89_ETSI] = 0,
+ [1][1][RTW89_FCC] = 3,
+ [1][1][RTW89_IC] = 3,
+ [1][1][RTW89_MKK] = 0,
+ [2][1][RTW89_FCC] = 1,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][0][0][RTW89_WW][0] = 60,
+ [0][0][0][0][RTW89_WW][1] = 60,
+ [0][0][0][0][RTW89_WW][2] = 60,
+ [0][0][0][0][RTW89_WW][3] = 60,
+ [0][0][0][0][RTW89_WW][4] = 60,
+ [0][0][0][0][RTW89_WW][5] = 60,
+ [0][0][0][0][RTW89_WW][6] = 60,
+ [0][0][0][0][RTW89_WW][7] = 60,
+ [0][0][0][0][RTW89_WW][8] = 60,
+ [0][0][0][0][RTW89_WW][9] = 60,
+ [0][0][0][0][RTW89_WW][10] = 60,
+ [0][0][0][0][RTW89_WW][11] = 60,
+ [0][0][0][0][RTW89_WW][12] = 58,
+ [0][0][0][0][RTW89_WW][13] = 74,
+ [0][1][0][0][RTW89_WW][0] = 48,
+ [0][1][0][0][RTW89_WW][1] = 48,
+ [0][1][0][0][RTW89_WW][2] = 48,
+ [0][1][0][0][RTW89_WW][3] = 48,
+ [0][1][0][0][RTW89_WW][4] = 48,
+ [0][1][0][0][RTW89_WW][5] = 48,
+ [0][1][0][0][RTW89_WW][6] = 48,
+ [0][1][0][0][RTW89_WW][7] = 48,
+ [0][1][0][0][RTW89_WW][8] = 48,
+ [0][1][0][0][RTW89_WW][9] = 48,
+ [0][1][0][0][RTW89_WW][10] = 48,
+ [0][1][0][0][RTW89_WW][11] = 48,
+ [0][1][0][0][RTW89_WW][12] = 44,
+ [0][1][0][0][RTW89_WW][13] = 62,
+ [1][0][0][0][RTW89_WW][0] = 0,
+ [1][0][0][0][RTW89_WW][1] = 0,
+ [1][0][0][0][RTW89_WW][2] = 52,
+ [1][0][0][0][RTW89_WW][3] = 52,
+ [1][0][0][0][RTW89_WW][4] = 52,
+ [1][0][0][0][RTW89_WW][5] = 60,
+ [1][0][0][0][RTW89_WW][6] = 52,
+ [1][0][0][0][RTW89_WW][7] = 52,
+ [1][0][0][0][RTW89_WW][8] = 52,
+ [1][0][0][0][RTW89_WW][9] = 44,
+ [1][0][0][0][RTW89_WW][10] = 32,
+ [1][0][0][0][RTW89_WW][11] = 0,
+ [1][0][0][0][RTW89_WW][12] = 0,
+ [1][0][0][0][RTW89_WW][13] = 0,
+ [1][1][0][0][RTW89_WW][0] = 0,
+ [1][1][0][0][RTW89_WW][1] = 0,
+ [1][1][0][0][RTW89_WW][2] = 48,
+ [1][1][0][0][RTW89_WW][3] = 48,
+ [1][1][0][0][RTW89_WW][4] = 48,
+ [1][1][0][0][RTW89_WW][5] = 48,
+ [1][1][0][0][RTW89_WW][6] = 36,
+ [1][1][0][0][RTW89_WW][7] = 36,
+ [1][1][0][0][RTW89_WW][8] = 36,
+ [1][1][0][0][RTW89_WW][9] = 32,
+ [1][1][0][0][RTW89_WW][10] = 32,
+ [1][1][0][0][RTW89_WW][11] = 0,
+ [1][1][0][0][RTW89_WW][12] = 0,
+ [1][1][0][0][RTW89_WW][13] = 0,
+ [0][0][1][0][RTW89_WW][0] = 60,
+ [0][0][1][0][RTW89_WW][1] = 60,
+ [0][0][1][0][RTW89_WW][2] = 60,
+ [0][0][1][0][RTW89_WW][3] = 60,
+ [0][0][1][0][RTW89_WW][4] = 60,
+ [0][0][1][0][RTW89_WW][5] = 60,
+ [0][0][1][0][RTW89_WW][6] = 60,
+ [0][0][1][0][RTW89_WW][7] = 60,
+ [0][0][1][0][RTW89_WW][8] = 60,
+ [0][0][1][0][RTW89_WW][9] = 60,
+ [0][0][1][0][RTW89_WW][10] = 60,
+ [0][0][1][0][RTW89_WW][11] = 56,
+ [0][0][1][0][RTW89_WW][12] = 52,
+ [0][0][1][0][RTW89_WW][13] = 0,
+ [0][1][1][0][RTW89_WW][0] = 48,
+ [0][1][1][0][RTW89_WW][1] = 48,
+ [0][1][1][0][RTW89_WW][2] = 48,
+ [0][1][1][0][RTW89_WW][3] = 48,
+ [0][1][1][0][RTW89_WW][4] = 48,
+ [0][1][1][0][RTW89_WW][5] = 48,
+ [0][1][1][0][RTW89_WW][6] = 48,
+ [0][1][1][0][RTW89_WW][7] = 48,
+ [0][1][1][0][RTW89_WW][8] = 48,
+ [0][1][1][0][RTW89_WW][9] = 48,
+ [0][1][1][0][RTW89_WW][10] = 48,
+ [0][1][1][0][RTW89_WW][11] = 48,
+ [0][1][1][0][RTW89_WW][12] = 44,
+ [0][1][1][0][RTW89_WW][13] = 0,
+ [0][0][2][0][RTW89_WW][0] = 60,
+ [0][0][2][0][RTW89_WW][1] = 60,
+ [0][0][2][0][RTW89_WW][2] = 60,
+ [0][0][2][0][RTW89_WW][3] = 60,
+ [0][0][2][0][RTW89_WW][4] = 60,
+ [0][0][2][0][RTW89_WW][5] = 60,
+ [0][0][2][0][RTW89_WW][6] = 60,
+ [0][0][2][0][RTW89_WW][7] = 60,
+ [0][0][2][0][RTW89_WW][8] = 60,
+ [0][0][2][0][RTW89_WW][9] = 60,
+ [0][0][2][0][RTW89_WW][10] = 60,
+ [0][0][2][0][RTW89_WW][11] = 56,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][13] = 0,
+ [0][1][2][0][RTW89_WW][0] = 48,
+ [0][1][2][0][RTW89_WW][1] = 48,
+ [0][1][2][0][RTW89_WW][2] = 48,
+ [0][1][2][0][RTW89_WW][3] = 48,
+ [0][1][2][0][RTW89_WW][4] = 48,
+ [0][1][2][0][RTW89_WW][5] = 48,
+ [0][1][2][0][RTW89_WW][6] = 48,
+ [0][1][2][0][RTW89_WW][7] = 48,
+ [0][1][2][0][RTW89_WW][8] = 48,
+ [0][1][2][0][RTW89_WW][9] = 48,
+ [0][1][2][0][RTW89_WW][10] = 48,
+ [0][1][2][0][RTW89_WW][11] = 48,
+ [0][1][2][0][RTW89_WW][12] = 44,
+ [0][1][2][0][RTW89_WW][13] = 0,
+ [0][1][2][1][RTW89_WW][0] = 36,
+ [0][1][2][1][RTW89_WW][1] = 36,
+ [0][1][2][1][RTW89_WW][2] = 36,
+ [0][1][2][1][RTW89_WW][3] = 36,
+ [0][1][2][1][RTW89_WW][4] = 36,
+ [0][1][2][1][RTW89_WW][5] = 36,
+ [0][1][2][1][RTW89_WW][6] = 36,
+ [0][1][2][1][RTW89_WW][7] = 36,
+ [0][1][2][1][RTW89_WW][8] = 36,
+ [0][1][2][1][RTW89_WW][9] = 36,
+ [0][1][2][1][RTW89_WW][10] = 36,
+ [0][1][2][1][RTW89_WW][11] = 36,
+ [0][1][2][1][RTW89_WW][12] = 36,
+ [0][1][2][1][RTW89_WW][13] = 0,
+ [1][0][2][0][RTW89_WW][0] = 0,
+ [1][0][2][0][RTW89_WW][1] = 0,
+ [1][0][2][0][RTW89_WW][2] = 60,
+ [1][0][2][0][RTW89_WW][3] = 60,
+ [1][0][2][0][RTW89_WW][4] = 60,
+ [1][0][2][0][RTW89_WW][5] = 60,
+ [1][0][2][0][RTW89_WW][6] = 60,
+ [1][0][2][0][RTW89_WW][7] = 60,
+ [1][0][2][0][RTW89_WW][8] = 60,
+ [1][0][2][0][RTW89_WW][9] = 60,
+ [1][0][2][0][RTW89_WW][10] = 60,
+ [1][0][2][0][RTW89_WW][11] = 0,
+ [1][0][2][0][RTW89_WW][12] = 0,
+ [1][0][2][0][RTW89_WW][13] = 0,
+ [1][1][2][0][RTW89_WW][0] = 0,
+ [1][1][2][0][RTW89_WW][1] = 0,
+ [1][1][2][0][RTW89_WW][2] = 48,
+ [1][1][2][0][RTW89_WW][3] = 48,
+ [1][1][2][0][RTW89_WW][4] = 48,
+ [1][1][2][0][RTW89_WW][5] = 48,
+ [1][1][2][0][RTW89_WW][6] = 48,
+ [1][1][2][0][RTW89_WW][7] = 48,
+ [1][1][2][0][RTW89_WW][8] = 48,
+ [1][1][2][0][RTW89_WW][9] = 44,
+ [1][1][2][0][RTW89_WW][10] = 40,
+ [1][1][2][0][RTW89_WW][11] = 0,
+ [1][1][2][0][RTW89_WW][12] = 0,
+ [1][1][2][0][RTW89_WW][13] = 0,
+ [1][1][2][1][RTW89_WW][0] = 0,
+ [1][1][2][1][RTW89_WW][1] = 0,
+ [1][1][2][1][RTW89_WW][2] = 36,
+ [1][1][2][1][RTW89_WW][3] = 36,
+ [1][1][2][1][RTW89_WW][4] = 36,
+ [1][1][2][1][RTW89_WW][5] = 36,
+ [1][1][2][1][RTW89_WW][6] = 36,
+ [1][1][2][1][RTW89_WW][7] = 36,
+ [1][1][2][1][RTW89_WW][8] = 36,
+ [1][1][2][1][RTW89_WW][9] = 36,
+ [1][1][2][1][RTW89_WW][10] = 36,
+ [1][1][2][1][RTW89_WW][11] = 0,
+ [1][1][2][1][RTW89_WW][12] = 0,
+ [1][1][2][1][RTW89_WW][13] = 0,
+ [0][0][0][0][RTW89_FCC][0] = 80,
+ [0][0][0][0][RTW89_ETSI][0] = 60,
+ [0][0][0][0][RTW89_MKK][0] = 72,
+ [0][0][0][0][RTW89_IC][0] = 80,
+ [0][0][0][0][RTW89_ACMA][0] = 60,
+ [0][0][0][0][RTW89_FCC][1] = 80,
+ [0][0][0][0][RTW89_ETSI][1] = 60,
+ [0][0][0][0][RTW89_MKK][1] = 72,
+ [0][0][0][0][RTW89_IC][1] = 80,
+ [0][0][0][0][RTW89_ACMA][1] = 60,
+ [0][0][0][0][RTW89_FCC][2] = 80,
+ [0][0][0][0][RTW89_ETSI][2] = 60,
+ [0][0][0][0][RTW89_MKK][2] = 72,
+ [0][0][0][0][RTW89_IC][2] = 80,
+ [0][0][0][0][RTW89_ACMA][2] = 60,
+ [0][0][0][0][RTW89_FCC][3] = 80,
+ [0][0][0][0][RTW89_ETSI][3] = 60,
+ [0][0][0][0][RTW89_MKK][3] = 72,
+ [0][0][0][0][RTW89_IC][3] = 80,
+ [0][0][0][0][RTW89_ACMA][3] = 60,
+ [0][0][0][0][RTW89_FCC][4] = 80,
+ [0][0][0][0][RTW89_ETSI][4] = 60,
+ [0][0][0][0][RTW89_MKK][4] = 72,
+ [0][0][0][0][RTW89_IC][4] = 80,
+ [0][0][0][0][RTW89_ACMA][4] = 60,
+ [0][0][0][0][RTW89_FCC][5] = 80,
+ [0][0][0][0][RTW89_ETSI][5] = 60,
+ [0][0][0][0][RTW89_MKK][5] = 72,
+ [0][0][0][0][RTW89_IC][5] = 80,
+ [0][0][0][0][RTW89_ACMA][5] = 60,
+ [0][0][0][0][RTW89_FCC][6] = 80,
+ [0][0][0][0][RTW89_ETSI][6] = 60,
+ [0][0][0][0][RTW89_MKK][6] = 72,
+ [0][0][0][0][RTW89_IC][6] = 80,
+ [0][0][0][0][RTW89_ACMA][6] = 60,
+ [0][0][0][0][RTW89_FCC][7] = 80,
+ [0][0][0][0][RTW89_ETSI][7] = 60,
+ [0][0][0][0][RTW89_MKK][7] = 72,
+ [0][0][0][0][RTW89_IC][7] = 80,
+ [0][0][0][0][RTW89_ACMA][7] = 60,
+ [0][0][0][0][RTW89_FCC][8] = 80,
+ [0][0][0][0][RTW89_ETSI][8] = 60,
+ [0][0][0][0][RTW89_MKK][8] = 72,
+ [0][0][0][0][RTW89_IC][8] = 80,
+ [0][0][0][0][RTW89_ACMA][8] = 60,
+ [0][0][0][0][RTW89_FCC][9] = 80,
+ [0][0][0][0][RTW89_ETSI][9] = 60,
+ [0][0][0][0][RTW89_MKK][9] = 72,
+ [0][0][0][0][RTW89_IC][9] = 80,
+ [0][0][0][0][RTW89_ACMA][9] = 60,
+ [0][0][0][0][RTW89_FCC][10] = 80,
+ [0][0][0][0][RTW89_ETSI][10] = 60,
+ [0][0][0][0][RTW89_MKK][10] = 72,
+ [0][0][0][0][RTW89_IC][10] = 80,
+ [0][0][0][0][RTW89_ACMA][10] = 60,
+ [0][0][0][0][RTW89_FCC][11] = 72,
+ [0][0][0][0][RTW89_ETSI][11] = 60,
+ [0][0][0][0][RTW89_MKK][11] = 72,
+ [0][0][0][0][RTW89_IC][11] = 72,
+ [0][0][0][0][RTW89_ACMA][11] = 60,
+ [0][0][0][0][RTW89_FCC][12] = 58,
+ [0][0][0][0][RTW89_ETSI][12] = 60,
+ [0][0][0][0][RTW89_MKK][12] = 72,
+ [0][0][0][0][RTW89_IC][12] = 58,
+ [0][0][0][0][RTW89_ACMA][12] = 60,
+ [0][0][0][0][RTW89_FCC][13] = 127,
+ [0][0][0][0][RTW89_ETSI][13] = 127,
+ [0][0][0][0][RTW89_MKK][13] = 74,
+ [0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 76,
+ [0][1][0][0][RTW89_ETSI][0] = 48,
+ [0][1][0][0][RTW89_MKK][0] = 60,
+ [0][1][0][0][RTW89_IC][0] = 76,
+ [0][1][0][0][RTW89_ACMA][0] = 48,
+ [0][1][0][0][RTW89_FCC][1] = 76,
+ [0][1][0][0][RTW89_ETSI][1] = 48,
+ [0][1][0][0][RTW89_MKK][1] = 60,
+ [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_ACMA][1] = 48,
+ [0][1][0][0][RTW89_FCC][2] = 76,
+ [0][1][0][0][RTW89_ETSI][2] = 48,
+ [0][1][0][0][RTW89_MKK][2] = 60,
+ [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_ACMA][2] = 48,
+ [0][1][0][0][RTW89_FCC][3] = 76,
+ [0][1][0][0][RTW89_ETSI][3] = 48,
+ [0][1][0][0][RTW89_MKK][3] = 60,
+ [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_ACMA][3] = 48,
+ [0][1][0][0][RTW89_FCC][4] = 76,
+ [0][1][0][0][RTW89_ETSI][4] = 48,
+ [0][1][0][0][RTW89_MKK][4] = 60,
+ [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_ACMA][4] = 48,
+ [0][1][0][0][RTW89_FCC][5] = 76,
+ [0][1][0][0][RTW89_ETSI][5] = 48,
+ [0][1][0][0][RTW89_MKK][5] = 60,
+ [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_ACMA][5] = 48,
+ [0][1][0][0][RTW89_FCC][6] = 76,
+ [0][1][0][0][RTW89_ETSI][6] = 48,
+ [0][1][0][0][RTW89_MKK][6] = 60,
+ [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_ACMA][6] = 48,
+ [0][1][0][0][RTW89_FCC][7] = 76,
+ [0][1][0][0][RTW89_ETSI][7] = 48,
+ [0][1][0][0][RTW89_MKK][7] = 60,
+ [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_ACMA][7] = 48,
+ [0][1][0][0][RTW89_FCC][8] = 76,
+ [0][1][0][0][RTW89_ETSI][8] = 48,
+ [0][1][0][0][RTW89_MKK][8] = 60,
+ [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_ACMA][8] = 48,
+ [0][1][0][0][RTW89_FCC][9] = 76,
+ [0][1][0][0][RTW89_ETSI][9] = 48,
+ [0][1][0][0][RTW89_MKK][9] = 60,
+ [0][1][0][0][RTW89_IC][9] = 76,
+ [0][1][0][0][RTW89_ACMA][9] = 48,
+ [0][1][0][0][RTW89_FCC][10] = 76,
+ [0][1][0][0][RTW89_ETSI][10] = 48,
+ [0][1][0][0][RTW89_MKK][10] = 60,
+ [0][1][0][0][RTW89_IC][10] = 76,
+ [0][1][0][0][RTW89_ACMA][10] = 48,
+ [0][1][0][0][RTW89_FCC][11] = 56,
+ [0][1][0][0][RTW89_ETSI][11] = 48,
+ [0][1][0][0][RTW89_MKK][11] = 60,
+ [0][1][0][0][RTW89_IC][11] = 56,
+ [0][1][0][0][RTW89_ACMA][11] = 48,
+ [0][1][0][0][RTW89_FCC][12] = 44,
+ [0][1][0][0][RTW89_ETSI][12] = 48,
+ [0][1][0][0][RTW89_MKK][12] = 60,
+ [0][1][0][0][RTW89_IC][12] = 44,
+ [0][1][0][0][RTW89_ACMA][12] = 48,
+ [0][1][0][0][RTW89_FCC][13] = 127,
+ [0][1][0][0][RTW89_ETSI][13] = 127,
+ [0][1][0][0][RTW89_MKK][13] = 62,
+ [0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_FCC][0] = 127,
+ [1][0][0][0][RTW89_ETSI][0] = 127,
+ [1][0][0][0][RTW89_MKK][0] = 127,
+ [1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_FCC][1] = 127,
+ [1][0][0][0][RTW89_ETSI][1] = 127,
+ [1][0][0][0][RTW89_MKK][1] = 127,
+ [1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_ACMA][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 52,
+ [1][0][0][0][RTW89_ETSI][2] = 60,
+ [1][0][0][0][RTW89_MKK][2] = 72,
+ [1][0][0][0][RTW89_IC][2] = 52,
+ [1][0][0][0][RTW89_ACMA][2] = 60,
+ [1][0][0][0][RTW89_FCC][3] = 52,
+ [1][0][0][0][RTW89_ETSI][3] = 60,
+ [1][0][0][0][RTW89_MKK][3] = 72,
+ [1][0][0][0][RTW89_IC][3] = 52,
+ [1][0][0][0][RTW89_ACMA][3] = 60,
+ [1][0][0][0][RTW89_FCC][4] = 52,
+ [1][0][0][0][RTW89_ETSI][4] = 60,
+ [1][0][0][0][RTW89_MKK][4] = 72,
+ [1][0][0][0][RTW89_IC][4] = 52,
+ [1][0][0][0][RTW89_ACMA][4] = 60,
+ [1][0][0][0][RTW89_FCC][5] = 68,
+ [1][0][0][0][RTW89_ETSI][5] = 60,
+ [1][0][0][0][RTW89_MKK][5] = 72,
+ [1][0][0][0][RTW89_IC][5] = 68,
+ [1][0][0][0][RTW89_ACMA][5] = 60,
+ [1][0][0][0][RTW89_FCC][6] = 52,
+ [1][0][0][0][RTW89_ETSI][6] = 60,
+ [1][0][0][0][RTW89_MKK][6] = 72,
+ [1][0][0][0][RTW89_IC][6] = 52,
+ [1][0][0][0][RTW89_ACMA][6] = 60,
+ [1][0][0][0][RTW89_FCC][7] = 52,
+ [1][0][0][0][RTW89_ETSI][7] = 60,
+ [1][0][0][0][RTW89_MKK][7] = 72,
+ [1][0][0][0][RTW89_IC][7] = 52,
+ [1][0][0][0][RTW89_ACMA][7] = 60,
+ [1][0][0][0][RTW89_FCC][8] = 52,
+ [1][0][0][0][RTW89_ETSI][8] = 60,
+ [1][0][0][0][RTW89_MKK][8] = 72,
+ [1][0][0][0][RTW89_IC][8] = 52,
+ [1][0][0][0][RTW89_ACMA][8] = 60,
+ [1][0][0][0][RTW89_FCC][9] = 44,
+ [1][0][0][0][RTW89_ETSI][9] = 60,
+ [1][0][0][0][RTW89_MKK][9] = 72,
+ [1][0][0][0][RTW89_IC][9] = 44,
+ [1][0][0][0][RTW89_ACMA][9] = 60,
+ [1][0][0][0][RTW89_FCC][10] = 32,
+ [1][0][0][0][RTW89_ETSI][10] = 60,
+ [1][0][0][0][RTW89_MKK][10] = 70,
+ [1][0][0][0][RTW89_IC][10] = 32,
+ [1][0][0][0][RTW89_ACMA][10] = 60,
+ [1][0][0][0][RTW89_FCC][11] = 127,
+ [1][0][0][0][RTW89_ETSI][11] = 127,
+ [1][0][0][0][RTW89_MKK][11] = 127,
+ [1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_FCC][12] = 127,
+ [1][0][0][0][RTW89_ETSI][12] = 127,
+ [1][0][0][0][RTW89_MKK][12] = 127,
+ [1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_FCC][13] = 127,
+ [1][0][0][0][RTW89_ETSI][13] = 127,
+ [1][0][0][0][RTW89_MKK][13] = 127,
+ [1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][1][0][0][RTW89_FCC][0] = 127,
+ [1][1][0][0][RTW89_ETSI][0] = 127,
+ [1][1][0][0][RTW89_MKK][0] = 127,
+ [1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_FCC][1] = 127,
+ [1][1][0][0][RTW89_ETSI][1] = 127,
+ [1][1][0][0][RTW89_MKK][1] = 127,
+ [1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_ACMA][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 48,
+ [1][1][0][0][RTW89_ETSI][2] = 48,
+ [1][1][0][0][RTW89_MKK][2] = 60,
+ [1][1][0][0][RTW89_IC][2] = 48,
+ [1][1][0][0][RTW89_ACMA][2] = 48,
+ [1][1][0][0][RTW89_FCC][3] = 48,
+ [1][1][0][0][RTW89_ETSI][3] = 48,
+ [1][1][0][0][RTW89_MKK][3] = 60,
+ [1][1][0][0][RTW89_IC][3] = 48,
+ [1][1][0][0][RTW89_ACMA][3] = 48,
+ [1][1][0][0][RTW89_FCC][4] = 48,
+ [1][1][0][0][RTW89_ETSI][4] = 48,
+ [1][1][0][0][RTW89_MKK][4] = 60,
+ [1][1][0][0][RTW89_IC][4] = 48,
+ [1][1][0][0][RTW89_ACMA][4] = 48,
+ [1][1][0][0][RTW89_FCC][5] = 64,
+ [1][1][0][0][RTW89_ETSI][5] = 48,
+ [1][1][0][0][RTW89_MKK][5] = 60,
+ [1][1][0][0][RTW89_IC][5] = 64,
+ [1][1][0][0][RTW89_ACMA][5] = 48,
+ [1][1][0][0][RTW89_FCC][6] = 36,
+ [1][1][0][0][RTW89_ETSI][6] = 48,
+ [1][1][0][0][RTW89_MKK][6] = 60,
+ [1][1][0][0][RTW89_IC][6] = 36,
+ [1][1][0][0][RTW89_ACMA][6] = 48,
+ [1][1][0][0][RTW89_FCC][7] = 36,
+ [1][1][0][0][RTW89_ETSI][7] = 48,
+ [1][1][0][0][RTW89_MKK][7] = 60,
+ [1][1][0][0][RTW89_IC][7] = 36,
+ [1][1][0][0][RTW89_ACMA][7] = 48,
+ [1][1][0][0][RTW89_FCC][8] = 36,
+ [1][1][0][0][RTW89_ETSI][8] = 48,
+ [1][1][0][0][RTW89_MKK][8] = 60,
+ [1][1][0][0][RTW89_IC][8] = 36,
+ [1][1][0][0][RTW89_ACMA][8] = 48,
+ [1][1][0][0][RTW89_FCC][9] = 32,
+ [1][1][0][0][RTW89_ETSI][9] = 48,
+ [1][1][0][0][RTW89_MKK][9] = 60,
+ [1][1][0][0][RTW89_IC][9] = 32,
+ [1][1][0][0][RTW89_ACMA][9] = 48,
+ [1][1][0][0][RTW89_FCC][10] = 32,
+ [1][1][0][0][RTW89_ETSI][10] = 48,
+ [1][1][0][0][RTW89_MKK][10] = 58,
+ [1][1][0][0][RTW89_IC][10] = 32,
+ [1][1][0][0][RTW89_ACMA][10] = 48,
+ [1][1][0][0][RTW89_FCC][11] = 127,
+ [1][1][0][0][RTW89_ETSI][11] = 127,
+ [1][1][0][0][RTW89_MKK][11] = 127,
+ [1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_FCC][12] = 127,
+ [1][1][0][0][RTW89_ETSI][12] = 127,
+ [1][1][0][0][RTW89_MKK][12] = 127,
+ [1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_FCC][13] = 127,
+ [1][1][0][0][RTW89_ETSI][13] = 127,
+ [1][1][0][0][RTW89_MKK][13] = 127,
+ [1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_ACMA][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 78,
+ [0][0][1][0][RTW89_ETSI][0] = 60,
+ [0][0][1][0][RTW89_MKK][0] = 76,
+ [0][0][1][0][RTW89_IC][0] = 78,
+ [0][0][1][0][RTW89_ACMA][0] = 60,
+ [0][0][1][0][RTW89_FCC][1] = 78,
+ [0][0][1][0][RTW89_ETSI][1] = 60,
+ [0][0][1][0][RTW89_MKK][1] = 76,
+ [0][0][1][0][RTW89_IC][1] = 78,
+ [0][0][1][0][RTW89_ACMA][1] = 60,
+ [0][0][1][0][RTW89_FCC][2] = 80,
+ [0][0][1][0][RTW89_ETSI][2] = 60,
+ [0][0][1][0][RTW89_MKK][2] = 76,
+ [0][0][1][0][RTW89_IC][2] = 80,
+ [0][0][1][0][RTW89_ACMA][2] = 60,
+ [0][0][1][0][RTW89_FCC][3] = 80,
+ [0][0][1][0][RTW89_ETSI][3] = 60,
+ [0][0][1][0][RTW89_MKK][3] = 76,
+ [0][0][1][0][RTW89_IC][3] = 80,
+ [0][0][1][0][RTW89_ACMA][3] = 60,
+ [0][0][1][0][RTW89_FCC][4] = 80,
+ [0][0][1][0][RTW89_ETSI][4] = 60,
+ [0][0][1][0][RTW89_MKK][4] = 76,
+ [0][0][1][0][RTW89_IC][4] = 80,
+ [0][0][1][0][RTW89_ACMA][4] = 60,
+ [0][0][1][0][RTW89_FCC][5] = 80,
+ [0][0][1][0][RTW89_ETSI][5] = 60,
+ [0][0][1][0][RTW89_MKK][5] = 76,
+ [0][0][1][0][RTW89_IC][5] = 80,
+ [0][0][1][0][RTW89_ACMA][5] = 60,
+ [0][0][1][0][RTW89_FCC][6] = 80,
+ [0][0][1][0][RTW89_ETSI][6] = 60,
+ [0][0][1][0][RTW89_MKK][6] = 76,
+ [0][0][1][0][RTW89_IC][6] = 80,
+ [0][0][1][0][RTW89_ACMA][6] = 60,
+ [0][0][1][0][RTW89_FCC][7] = 80,
+ [0][0][1][0][RTW89_ETSI][7] = 60,
+ [0][0][1][0][RTW89_MKK][7] = 76,
+ [0][0][1][0][RTW89_IC][7] = 80,
+ [0][0][1][0][RTW89_ACMA][7] = 60,
+ [0][0][1][0][RTW89_FCC][8] = 80,
+ [0][0][1][0][RTW89_ETSI][8] = 60,
+ [0][0][1][0][RTW89_MKK][8] = 76,
+ [0][0][1][0][RTW89_IC][8] = 80,
+ [0][0][1][0][RTW89_ACMA][8] = 60,
+ [0][0][1][0][RTW89_FCC][9] = 76,
+ [0][0][1][0][RTW89_ETSI][9] = 60,
+ [0][0][1][0][RTW89_MKK][9] = 76,
+ [0][0][1][0][RTW89_IC][9] = 76,
+ [0][0][1][0][RTW89_ACMA][9] = 60,
+ [0][0][1][0][RTW89_FCC][10] = 76,
+ [0][0][1][0][RTW89_ETSI][10] = 60,
+ [0][0][1][0][RTW89_MKK][10] = 76,
+ [0][0][1][0][RTW89_IC][10] = 76,
+ [0][0][1][0][RTW89_ACMA][10] = 60,
+ [0][0][1][0][RTW89_FCC][11] = 56,
+ [0][0][1][0][RTW89_ETSI][11] = 60,
+ [0][0][1][0][RTW89_MKK][11] = 76,
+ [0][0][1][0][RTW89_IC][11] = 56,
+ [0][0][1][0][RTW89_ACMA][11] = 60,
+ [0][0][1][0][RTW89_FCC][12] = 52,
+ [0][0][1][0][RTW89_ETSI][12] = 60,
+ [0][0][1][0][RTW89_MKK][12] = 76,
+ [0][0][1][0][RTW89_IC][12] = 52,
+ [0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_FCC][13] = 127,
+ [0][0][1][0][RTW89_ETSI][13] = 127,
+ [0][0][1][0][RTW89_MKK][13] = 127,
+ [0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_ACMA][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 64,
+ [0][1][1][0][RTW89_ETSI][0] = 48,
+ [0][1][1][0][RTW89_MKK][0] = 68,
+ [0][1][1][0][RTW89_IC][0] = 64,
+ [0][1][1][0][RTW89_ACMA][0] = 48,
+ [0][1][1][0][RTW89_FCC][1] = 64,
+ [0][1][1][0][RTW89_ETSI][1] = 48,
+ [0][1][1][0][RTW89_MKK][1] = 68,
+ [0][1][1][0][RTW89_IC][1] = 64,
+ [0][1][1][0][RTW89_ACMA][1] = 48,
+ [0][1][1][0][RTW89_FCC][2] = 68,
+ [0][1][1][0][RTW89_ETSI][2] = 48,
+ [0][1][1][0][RTW89_MKK][2] = 68,
+ [0][1][1][0][RTW89_IC][2] = 68,
+ [0][1][1][0][RTW89_ACMA][2] = 48,
+ [0][1][1][0][RTW89_FCC][3] = 72,
+ [0][1][1][0][RTW89_ETSI][3] = 48,
+ [0][1][1][0][RTW89_MKK][3] = 68,
+ [0][1][1][0][RTW89_IC][3] = 72,
+ [0][1][1][0][RTW89_ACMA][3] = 48,
+ [0][1][1][0][RTW89_FCC][4] = 80,
+ [0][1][1][0][RTW89_ETSI][4] = 48,
+ [0][1][1][0][RTW89_MKK][4] = 68,
+ [0][1][1][0][RTW89_IC][4] = 80,
+ [0][1][1][0][RTW89_ACMA][4] = 48,
+ [0][1][1][0][RTW89_FCC][5] = 80,
+ [0][1][1][0][RTW89_ETSI][5] = 48,
+ [0][1][1][0][RTW89_MKK][5] = 68,
+ [0][1][1][0][RTW89_IC][5] = 80,
+ [0][1][1][0][RTW89_ACMA][5] = 48,
+ [0][1][1][0][RTW89_FCC][6] = 80,
+ [0][1][1][0][RTW89_ETSI][6] = 48,
+ [0][1][1][0][RTW89_MKK][6] = 68,
+ [0][1][1][0][RTW89_IC][6] = 80,
+ [0][1][1][0][RTW89_ACMA][6] = 48,
+ [0][1][1][0][RTW89_FCC][7] = 72,
+ [0][1][1][0][RTW89_ETSI][7] = 48,
+ [0][1][1][0][RTW89_MKK][7] = 68,
+ [0][1][1][0][RTW89_IC][7] = 72,
+ [0][1][1][0][RTW89_ACMA][7] = 48,
+ [0][1][1][0][RTW89_FCC][8] = 68,
+ [0][1][1][0][RTW89_ETSI][8] = 48,
+ [0][1][1][0][RTW89_MKK][8] = 68,
+ [0][1][1][0][RTW89_IC][8] = 68,
+ [0][1][1][0][RTW89_ACMA][8] = 48,
+ [0][1][1][0][RTW89_FCC][9] = 64,
+ [0][1][1][0][RTW89_ETSI][9] = 48,
+ [0][1][1][0][RTW89_MKK][9] = 68,
+ [0][1][1][0][RTW89_IC][9] = 64,
+ [0][1][1][0][RTW89_ACMA][9] = 48,
+ [0][1][1][0][RTW89_FCC][10] = 64,
+ [0][1][1][0][RTW89_ETSI][10] = 48,
+ [0][1][1][0][RTW89_MKK][10] = 68,
+ [0][1][1][0][RTW89_IC][10] = 64,
+ [0][1][1][0][RTW89_ACMA][10] = 48,
+ [0][1][1][0][RTW89_FCC][11] = 48,
+ [0][1][1][0][RTW89_ETSI][11] = 48,
+ [0][1][1][0][RTW89_MKK][11] = 68,
+ [0][1][1][0][RTW89_IC][11] = 48,
+ [0][1][1][0][RTW89_ACMA][11] = 48,
+ [0][1][1][0][RTW89_FCC][12] = 44,
+ [0][1][1][0][RTW89_ETSI][12] = 48,
+ [0][1][1][0][RTW89_MKK][12] = 68,
+ [0][1][1][0][RTW89_IC][12] = 44,
+ [0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_FCC][13] = 127,
+ [0][1][1][0][RTW89_ETSI][13] = 127,
+ [0][1][1][0][RTW89_MKK][13] = 127,
+ [0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_ACMA][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 78,
+ [0][0][2][0][RTW89_ETSI][0] = 60,
+ [0][0][2][0][RTW89_MKK][0] = 76,
+ [0][0][2][0][RTW89_IC][0] = 78,
+ [0][0][2][0][RTW89_ACMA][0] = 60,
+ [0][0][2][0][RTW89_FCC][1] = 78,
+ [0][0][2][0][RTW89_ETSI][1] = 60,
+ [0][0][2][0][RTW89_MKK][1] = 76,
+ [0][0][2][0][RTW89_IC][1] = 78,
+ [0][0][2][0][RTW89_ACMA][1] = 60,
+ [0][0][2][0][RTW89_FCC][2] = 80,
+ [0][0][2][0][RTW89_ETSI][2] = 60,
+ [0][0][2][0][RTW89_MKK][2] = 76,
+ [0][0][2][0][RTW89_IC][2] = 80,
+ [0][0][2][0][RTW89_ACMA][2] = 60,
+ [0][0][2][0][RTW89_FCC][3] = 80,
+ [0][0][2][0][RTW89_ETSI][3] = 60,
+ [0][0][2][0][RTW89_MKK][3] = 76,
+ [0][0][2][0][RTW89_IC][3] = 80,
+ [0][0][2][0][RTW89_ACMA][3] = 60,
+ [0][0][2][0][RTW89_FCC][4] = 80,
+ [0][0][2][0][RTW89_ETSI][4] = 60,
+ [0][0][2][0][RTW89_MKK][4] = 76,
+ [0][0][2][0][RTW89_IC][4] = 80,
+ [0][0][2][0][RTW89_ACMA][4] = 60,
+ [0][0][2][0][RTW89_FCC][5] = 80,
+ [0][0][2][0][RTW89_ETSI][5] = 60,
+ [0][0][2][0][RTW89_MKK][5] = 76,
+ [0][0][2][0][RTW89_IC][5] = 80,
+ [0][0][2][0][RTW89_ACMA][5] = 60,
+ [0][0][2][0][RTW89_FCC][6] = 80,
+ [0][0][2][0][RTW89_ETSI][6] = 60,
+ [0][0][2][0][RTW89_MKK][6] = 76,
+ [0][0][2][0][RTW89_IC][6] = 80,
+ [0][0][2][0][RTW89_ACMA][6] = 60,
+ [0][0][2][0][RTW89_FCC][7] = 80,
+ [0][0][2][0][RTW89_ETSI][7] = 60,
+ [0][0][2][0][RTW89_MKK][7] = 76,
+ [0][0][2][0][RTW89_IC][7] = 80,
+ [0][0][2][0][RTW89_ACMA][7] = 60,
+ [0][0][2][0][RTW89_FCC][8] = 78,
+ [0][0][2][0][RTW89_ETSI][8] = 60,
+ [0][0][2][0][RTW89_MKK][8] = 76,
+ [0][0][2][0][RTW89_IC][8] = 78,
+ [0][0][2][0][RTW89_ACMA][8] = 60,
+ [0][0][2][0][RTW89_FCC][9] = 74,
+ [0][0][2][0][RTW89_ETSI][9] = 60,
+ [0][0][2][0][RTW89_MKK][9] = 76,
+ [0][0][2][0][RTW89_IC][9] = 74,
+ [0][0][2][0][RTW89_ACMA][9] = 60,
+ [0][0][2][0][RTW89_FCC][10] = 74,
+ [0][0][2][0][RTW89_ETSI][10] = 60,
+ [0][0][2][0][RTW89_MKK][10] = 76,
+ [0][0][2][0][RTW89_IC][10] = 74,
+ [0][0][2][0][RTW89_ACMA][10] = 60,
+ [0][0][2][0][RTW89_FCC][11] = 56,
+ [0][0][2][0][RTW89_ETSI][11] = 60,
+ [0][0][2][0][RTW89_MKK][11] = 76,
+ [0][0][2][0][RTW89_IC][11] = 56,
+ [0][0][2][0][RTW89_ACMA][11] = 60,
+ [0][0][2][0][RTW89_FCC][12] = 52,
+ [0][0][2][0][RTW89_ETSI][12] = 60,
+ [0][0][2][0][RTW89_MKK][12] = 76,
+ [0][0][2][0][RTW89_IC][12] = 52,
+ [0][0][2][0][RTW89_ACMA][12] = 60,
+ [0][0][2][0][RTW89_FCC][13] = 127,
+ [0][0][2][0][RTW89_ETSI][13] = 127,
+ [0][0][2][0][RTW89_MKK][13] = 127,
+ [0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_ACMA][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 60,
+ [0][1][2][0][RTW89_ETSI][0] = 48,
+ [0][1][2][0][RTW89_MKK][0] = 70,
+ [0][1][2][0][RTW89_IC][0] = 60,
+ [0][1][2][0][RTW89_ACMA][0] = 48,
+ [0][1][2][0][RTW89_FCC][1] = 60,
+ [0][1][2][0][RTW89_ETSI][1] = 48,
+ [0][1][2][0][RTW89_MKK][1] = 70,
+ [0][1][2][0][RTW89_IC][1] = 60,
+ [0][1][2][0][RTW89_ACMA][1] = 48,
+ [0][1][2][0][RTW89_FCC][2] = 64,
+ [0][1][2][0][RTW89_ETSI][2] = 48,
+ [0][1][2][0][RTW89_MKK][2] = 70,
+ [0][1][2][0][RTW89_IC][2] = 64,
+ [0][1][2][0][RTW89_ACMA][2] = 48,
+ [0][1][2][0][RTW89_FCC][3] = 68,
+ [0][1][2][0][RTW89_ETSI][3] = 48,
+ [0][1][2][0][RTW89_MKK][3] = 70,
+ [0][1][2][0][RTW89_IC][3] = 68,
+ [0][1][2][0][RTW89_ACMA][3] = 48,
+ [0][1][2][0][RTW89_FCC][4] = 74,
+ [0][1][2][0][RTW89_ETSI][4] = 48,
+ [0][1][2][0][RTW89_MKK][4] = 70,
+ [0][1][2][0][RTW89_IC][4] = 74,
+ [0][1][2][0][RTW89_ACMA][4] = 48,
+ [0][1][2][0][RTW89_FCC][5] = 80,
+ [0][1][2][0][RTW89_ETSI][5] = 48,
+ [0][1][2][0][RTW89_MKK][5] = 70,
+ [0][1][2][0][RTW89_IC][5] = 80,
+ [0][1][2][0][RTW89_ACMA][5] = 48,
+ [0][1][2][0][RTW89_FCC][6] = 76,
+ [0][1][2][0][RTW89_ETSI][6] = 48,
+ [0][1][2][0][RTW89_MKK][6] = 70,
+ [0][1][2][0][RTW89_IC][6] = 76,
+ [0][1][2][0][RTW89_ACMA][6] = 48,
+ [0][1][2][0][RTW89_FCC][7] = 68,
+ [0][1][2][0][RTW89_ETSI][7] = 48,
+ [0][1][2][0][RTW89_MKK][7] = 70,
+ [0][1][2][0][RTW89_IC][7] = 68,
+ [0][1][2][0][RTW89_ACMA][7] = 48,
+ [0][1][2][0][RTW89_FCC][8] = 64,
+ [0][1][2][0][RTW89_ETSI][8] = 48,
+ [0][1][2][0][RTW89_MKK][8] = 70,
+ [0][1][2][0][RTW89_IC][8] = 64,
+ [0][1][2][0][RTW89_ACMA][8] = 48,
+ [0][1][2][0][RTW89_FCC][9] = 60,
+ [0][1][2][0][RTW89_ETSI][9] = 48,
+ [0][1][2][0][RTW89_MKK][9] = 70,
+ [0][1][2][0][RTW89_IC][9] = 60,
+ [0][1][2][0][RTW89_ACMA][9] = 48,
+ [0][1][2][0][RTW89_FCC][10] = 60,
+ [0][1][2][0][RTW89_ETSI][10] = 48,
+ [0][1][2][0][RTW89_MKK][10] = 70,
+ [0][1][2][0][RTW89_IC][10] = 60,
+ [0][1][2][0][RTW89_ACMA][10] = 48,
+ [0][1][2][0][RTW89_FCC][11] = 48,
+ [0][1][2][0][RTW89_ETSI][11] = 48,
+ [0][1][2][0][RTW89_MKK][11] = 70,
+ [0][1][2][0][RTW89_IC][11] = 48,
+ [0][1][2][0][RTW89_ACMA][11] = 48,
+ [0][1][2][0][RTW89_FCC][12] = 44,
+ [0][1][2][0][RTW89_ETSI][12] = 48,
+ [0][1][2][0][RTW89_MKK][12] = 70,
+ [0][1][2][0][RTW89_IC][12] = 44,
+ [0][1][2][0][RTW89_ACMA][12] = 48,
+ [0][1][2][0][RTW89_FCC][13] = 127,
+ [0][1][2][0][RTW89_ETSI][13] = 127,
+ [0][1][2][0][RTW89_MKK][13] = 127,
+ [0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 60,
+ [0][1][2][1][RTW89_ETSI][0] = 38,
+ [0][1][2][1][RTW89_MKK][0] = 58,
+ [0][1][2][1][RTW89_IC][0] = 60,
+ [0][1][2][1][RTW89_ACMA][0] = 36,
+ [0][1][2][1][RTW89_FCC][1] = 60,
+ [0][1][2][1][RTW89_ETSI][1] = 38,
+ [0][1][2][1][RTW89_MKK][1] = 58,
+ [0][1][2][1][RTW89_IC][1] = 60,
+ [0][1][2][1][RTW89_ACMA][1] = 36,
+ [0][1][2][1][RTW89_FCC][2] = 64,
+ [0][1][2][1][RTW89_ETSI][2] = 38,
+ [0][1][2][1][RTW89_MKK][2] = 58,
+ [0][1][2][1][RTW89_IC][2] = 64,
+ [0][1][2][1][RTW89_ACMA][2] = 36,
+ [0][1][2][1][RTW89_FCC][3] = 68,
+ [0][1][2][1][RTW89_ETSI][3] = 38,
+ [0][1][2][1][RTW89_MKK][3] = 58,
+ [0][1][2][1][RTW89_IC][3] = 68,
+ [0][1][2][1][RTW89_ACMA][3] = 36,
+ [0][1][2][1][RTW89_FCC][4] = 74,
+ [0][1][2][1][RTW89_ETSI][4] = 38,
+ [0][1][2][1][RTW89_MKK][4] = 58,
+ [0][1][2][1][RTW89_IC][4] = 74,
+ [0][1][2][1][RTW89_ACMA][4] = 36,
+ [0][1][2][1][RTW89_FCC][5] = 80,
+ [0][1][2][1][RTW89_ETSI][5] = 38,
+ [0][1][2][1][RTW89_MKK][5] = 58,
+ [0][1][2][1][RTW89_IC][5] = 80,
+ [0][1][2][1][RTW89_ACMA][5] = 36,
+ [0][1][2][1][RTW89_FCC][6] = 76,
+ [0][1][2][1][RTW89_ETSI][6] = 38,
+ [0][1][2][1][RTW89_MKK][6] = 58,
+ [0][1][2][1][RTW89_IC][6] = 76,
+ [0][1][2][1][RTW89_ACMA][6] = 36,
+ [0][1][2][1][RTW89_FCC][7] = 68,
+ [0][1][2][1][RTW89_ETSI][7] = 38,
+ [0][1][2][1][RTW89_MKK][7] = 58,
+ [0][1][2][1][RTW89_IC][7] = 68,
+ [0][1][2][1][RTW89_ACMA][7] = 36,
+ [0][1][2][1][RTW89_FCC][8] = 64,
+ [0][1][2][1][RTW89_ETSI][8] = 38,
+ [0][1][2][1][RTW89_MKK][8] = 58,
+ [0][1][2][1][RTW89_IC][8] = 64,
+ [0][1][2][1][RTW89_ACMA][8] = 36,
+ [0][1][2][1][RTW89_FCC][9] = 60,
+ [0][1][2][1][RTW89_ETSI][9] = 38,
+ [0][1][2][1][RTW89_MKK][9] = 58,
+ [0][1][2][1][RTW89_IC][9] = 60,
+ [0][1][2][1][RTW89_ACMA][9] = 36,
+ [0][1][2][1][RTW89_FCC][10] = 60,
+ [0][1][2][1][RTW89_ETSI][10] = 38,
+ [0][1][2][1][RTW89_MKK][10] = 58,
+ [0][1][2][1][RTW89_IC][10] = 60,
+ [0][1][2][1][RTW89_ACMA][10] = 36,
+ [0][1][2][1][RTW89_FCC][11] = 48,
+ [0][1][2][1][RTW89_ETSI][11] = 38,
+ [0][1][2][1][RTW89_MKK][11] = 58,
+ [0][1][2][1][RTW89_IC][11] = 48,
+ [0][1][2][1][RTW89_ACMA][11] = 36,
+ [0][1][2][1][RTW89_FCC][12] = 44,
+ [0][1][2][1][RTW89_ETSI][12] = 38,
+ [0][1][2][1][RTW89_MKK][12] = 58,
+ [0][1][2][1][RTW89_IC][12] = 44,
+ [0][1][2][1][RTW89_ACMA][12] = 36,
+ [0][1][2][1][RTW89_FCC][13] = 127,
+ [0][1][2][1][RTW89_ETSI][13] = 127,
+ [0][1][2][1][RTW89_MKK][13] = 127,
+ [0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_FCC][0] = 127,
+ [1][0][2][0][RTW89_ETSI][0] = 127,
+ [1][0][2][0][RTW89_MKK][0] = 127,
+ [1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 127,
+ [1][0][2][0][RTW89_ETSI][1] = 127,
+ [1][0][2][0][RTW89_MKK][1] = 127,
+ [1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_ACMA][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 72,
+ [1][0][2][0][RTW89_ETSI][2] = 60,
+ [1][0][2][0][RTW89_MKK][2] = 72,
+ [1][0][2][0][RTW89_IC][2] = 72,
+ [1][0][2][0][RTW89_ACMA][2] = 60,
+ [1][0][2][0][RTW89_FCC][3] = 72,
+ [1][0][2][0][RTW89_ETSI][3] = 60,
+ [1][0][2][0][RTW89_MKK][3] = 72,
+ [1][0][2][0][RTW89_IC][3] = 72,
+ [1][0][2][0][RTW89_ACMA][3] = 60,
+ [1][0][2][0][RTW89_FCC][4] = 74,
+ [1][0][2][0][RTW89_ETSI][4] = 60,
+ [1][0][2][0][RTW89_MKK][4] = 72,
+ [1][0][2][0][RTW89_IC][4] = 74,
+ [1][0][2][0][RTW89_ACMA][4] = 60,
+ [1][0][2][0][RTW89_FCC][5] = 74,
+ [1][0][2][0][RTW89_ETSI][5] = 60,
+ [1][0][2][0][RTW89_MKK][5] = 72,
+ [1][0][2][0][RTW89_IC][5] = 74,
+ [1][0][2][0][RTW89_ACMA][5] = 60,
+ [1][0][2][0][RTW89_FCC][6] = 74,
+ [1][0][2][0][RTW89_ETSI][6] = 60,
+ [1][0][2][0][RTW89_MKK][6] = 72,
+ [1][0][2][0][RTW89_IC][6] = 74,
+ [1][0][2][0][RTW89_ACMA][6] = 60,
+ [1][0][2][0][RTW89_FCC][7] = 70,
+ [1][0][2][0][RTW89_ETSI][7] = 60,
+ [1][0][2][0][RTW89_MKK][7] = 72,
+ [1][0][2][0][RTW89_IC][7] = 70,
+ [1][0][2][0][RTW89_ACMA][7] = 60,
+ [1][0][2][0][RTW89_FCC][8] = 70,
+ [1][0][2][0][RTW89_ETSI][8] = 60,
+ [1][0][2][0][RTW89_MKK][8] = 72,
+ [1][0][2][0][RTW89_IC][8] = 70,
+ [1][0][2][0][RTW89_ACMA][8] = 60,
+ [1][0][2][0][RTW89_FCC][9] = 70,
+ [1][0][2][0][RTW89_ETSI][9] = 60,
+ [1][0][2][0][RTW89_MKK][9] = 72,
+ [1][0][2][0][RTW89_IC][9] = 70,
+ [1][0][2][0][RTW89_ACMA][9] = 60,
+ [1][0][2][0][RTW89_FCC][10] = 68,
+ [1][0][2][0][RTW89_ETSI][10] = 60,
+ [1][0][2][0][RTW89_MKK][10] = 72,
+ [1][0][2][0][RTW89_IC][10] = 68,
+ [1][0][2][0][RTW89_ACMA][10] = 60,
+ [1][0][2][0][RTW89_FCC][11] = 127,
+ [1][0][2][0][RTW89_ETSI][11] = 127,
+ [1][0][2][0][RTW89_MKK][11] = 127,
+ [1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_FCC][12] = 127,
+ [1][0][2][0][RTW89_ETSI][12] = 127,
+ [1][0][2][0][RTW89_MKK][12] = 127,
+ [1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_FCC][13] = 127,
+ [1][0][2][0][RTW89_ETSI][13] = 127,
+ [1][0][2][0][RTW89_MKK][13] = 127,
+ [1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_FCC][0] = 127,
+ [1][1][2][0][RTW89_ETSI][0] = 127,
+ [1][1][2][0][RTW89_MKK][0] = 127,
+ [1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 127,
+ [1][1][2][0][RTW89_ETSI][1] = 127,
+ [1][1][2][0][RTW89_MKK][1] = 127,
+ [1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_ACMA][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 56,
+ [1][1][2][0][RTW89_ETSI][2] = 48,
+ [1][1][2][0][RTW89_MKK][2] = 70,
+ [1][1][2][0][RTW89_IC][2] = 56,
+ [1][1][2][0][RTW89_ACMA][2] = 48,
+ [1][1][2][0][RTW89_FCC][3] = 56,
+ [1][1][2][0][RTW89_ETSI][3] = 48,
+ [1][1][2][0][RTW89_MKK][3] = 70,
+ [1][1][2][0][RTW89_IC][3] = 56,
+ [1][1][2][0][RTW89_ACMA][3] = 48,
+ [1][1][2][0][RTW89_FCC][4] = 60,
+ [1][1][2][0][RTW89_ETSI][4] = 48,
+ [1][1][2][0][RTW89_MKK][4] = 70,
+ [1][1][2][0][RTW89_IC][4] = 60,
+ [1][1][2][0][RTW89_ACMA][4] = 48,
+ [1][1][2][0][RTW89_FCC][5] = 68,
+ [1][1][2][0][RTW89_ETSI][5] = 48,
+ [1][1][2][0][RTW89_MKK][5] = 70,
+ [1][1][2][0][RTW89_IC][5] = 68,
+ [1][1][2][0][RTW89_ACMA][5] = 48,
+ [1][1][2][0][RTW89_FCC][6] = 60,
+ [1][1][2][0][RTW89_ETSI][6] = 48,
+ [1][1][2][0][RTW89_MKK][6] = 70,
+ [1][1][2][0][RTW89_IC][6] = 60,
+ [1][1][2][0][RTW89_ACMA][6] = 48,
+ [1][1][2][0][RTW89_FCC][7] = 56,
+ [1][1][2][0][RTW89_ETSI][7] = 48,
+ [1][1][2][0][RTW89_MKK][7] = 70,
+ [1][1][2][0][RTW89_IC][7] = 56,
+ [1][1][2][0][RTW89_ACMA][7] = 48,
+ [1][1][2][0][RTW89_FCC][8] = 56,
+ [1][1][2][0][RTW89_ETSI][8] = 48,
+ [1][1][2][0][RTW89_MKK][8] = 70,
+ [1][1][2][0][RTW89_IC][8] = 56,
+ [1][1][2][0][RTW89_ACMA][8] = 48,
+ [1][1][2][0][RTW89_FCC][9] = 44,
+ [1][1][2][0][RTW89_ETSI][9] = 48,
+ [1][1][2][0][RTW89_MKK][9] = 70,
+ [1][1][2][0][RTW89_IC][9] = 44,
+ [1][1][2][0][RTW89_ACMA][9] = 48,
+ [1][1][2][0][RTW89_FCC][10] = 40,
+ [1][1][2][0][RTW89_ETSI][10] = 48,
+ [1][1][2][0][RTW89_MKK][10] = 70,
+ [1][1][2][0][RTW89_IC][10] = 40,
+ [1][1][2][0][RTW89_ACMA][10] = 48,
+ [1][1][2][0][RTW89_FCC][11] = 127,
+ [1][1][2][0][RTW89_ETSI][11] = 127,
+ [1][1][2][0][RTW89_MKK][11] = 127,
+ [1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_FCC][12] = 127,
+ [1][1][2][0][RTW89_ETSI][12] = 127,
+ [1][1][2][0][RTW89_MKK][12] = 127,
+ [1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_FCC][13] = 127,
+ [1][1][2][0][RTW89_ETSI][13] = 127,
+ [1][1][2][0][RTW89_MKK][13] = 127,
+ [1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_FCC][0] = 127,
+ [1][1][2][1][RTW89_ETSI][0] = 127,
+ [1][1][2][1][RTW89_MKK][0] = 127,
+ [1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 127,
+ [1][1][2][1][RTW89_ETSI][1] = 127,
+ [1][1][2][1][RTW89_MKK][1] = 127,
+ [1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_ACMA][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 56,
+ [1][1][2][1][RTW89_ETSI][2] = 38,
+ [1][1][2][1][RTW89_MKK][2] = 58,
+ [1][1][2][1][RTW89_IC][2] = 56,
+ [1][1][2][1][RTW89_ACMA][2] = 36,
+ [1][1][2][1][RTW89_FCC][3] = 56,
+ [1][1][2][1][RTW89_ETSI][3] = 38,
+ [1][1][2][1][RTW89_MKK][3] = 58,
+ [1][1][2][1][RTW89_IC][3] = 56,
+ [1][1][2][1][RTW89_ACMA][3] = 36,
+ [1][1][2][1][RTW89_FCC][4] = 60,
+ [1][1][2][1][RTW89_ETSI][4] = 38,
+ [1][1][2][1][RTW89_MKK][4] = 58,
+ [1][1][2][1][RTW89_IC][4] = 60,
+ [1][1][2][1][RTW89_ACMA][4] = 36,
+ [1][1][2][1][RTW89_FCC][5] = 68,
+ [1][1][2][1][RTW89_ETSI][5] = 38,
+ [1][1][2][1][RTW89_MKK][5] = 58,
+ [1][1][2][1][RTW89_IC][5] = 68,
+ [1][1][2][1][RTW89_ACMA][5] = 36,
+ [1][1][2][1][RTW89_FCC][6] = 60,
+ [1][1][2][1][RTW89_ETSI][6] = 38,
+ [1][1][2][1][RTW89_MKK][6] = 58,
+ [1][1][2][1][RTW89_IC][6] = 60,
+ [1][1][2][1][RTW89_ACMA][6] = 36,
+ [1][1][2][1][RTW89_FCC][7] = 56,
+ [1][1][2][1][RTW89_ETSI][7] = 38,
+ [1][1][2][1][RTW89_MKK][7] = 58,
+ [1][1][2][1][RTW89_IC][7] = 56,
+ [1][1][2][1][RTW89_ACMA][7] = 36,
+ [1][1][2][1][RTW89_FCC][8] = 56,
+ [1][1][2][1][RTW89_ETSI][8] = 38,
+ [1][1][2][1][RTW89_MKK][8] = 58,
+ [1][1][2][1][RTW89_IC][8] = 56,
+ [1][1][2][1][RTW89_ACMA][8] = 36,
+ [1][1][2][1][RTW89_FCC][9] = 44,
+ [1][1][2][1][RTW89_ETSI][9] = 38,
+ [1][1][2][1][RTW89_MKK][9] = 58,
+ [1][1][2][1][RTW89_IC][9] = 44,
+ [1][1][2][1][RTW89_ACMA][9] = 36,
+ [1][1][2][1][RTW89_FCC][10] = 40,
+ [1][1][2][1][RTW89_ETSI][10] = 38,
+ [1][1][2][1][RTW89_MKK][10] = 58,
+ [1][1][2][1][RTW89_IC][10] = 40,
+ [1][1][2][1][RTW89_ACMA][10] = 36,
+ [1][1][2][1][RTW89_FCC][11] = 127,
+ [1][1][2][1][RTW89_ETSI][11] = 127,
+ [1][1][2][1][RTW89_MKK][11] = 127,
+ [1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_FCC][12] = 127,
+ [1][1][2][1][RTW89_ETSI][12] = 127,
+ [1][1][2][1][RTW89_MKK][12] = 127,
+ [1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_FCC][13] = 127,
+ [1][1][2][1][RTW89_ETSI][13] = 127,
+ [1][1][2][1][RTW89_MKK][13] = 127,
+ [1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_ACMA][13] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][1][0][RTW89_WW][0] = 60,
+ [0][0][1][0][RTW89_WW][2] = 60,
+ [0][0][1][0][RTW89_WW][4] = 60,
+ [0][0][1][0][RTW89_WW][6] = 60,
+ [0][0][1][0][RTW89_WW][8] = 60,
+ [0][0][1][0][RTW89_WW][10] = 60,
+ [0][0][1][0][RTW89_WW][12] = 60,
+ [0][0][1][0][RTW89_WW][14] = 60,
+ [0][0][1][0][RTW89_WW][15] = 60,
+ [0][0][1][0][RTW89_WW][17] = 60,
+ [0][0][1][0][RTW89_WW][19] = 60,
+ [0][0][1][0][RTW89_WW][21] = 60,
+ [0][0][1][0][RTW89_WW][23] = 60,
+ [0][0][1][0][RTW89_WW][25] = 60,
+ [0][0][1][0][RTW89_WW][27] = 60,
+ [0][0][1][0][RTW89_WW][29] = 60,
+ [0][0][1][0][RTW89_WW][31] = 60,
+ [0][0][1][0][RTW89_WW][33] = 60,
+ [0][0][1][0][RTW89_WW][35] = 60,
+ [0][0][1][0][RTW89_WW][37] = 78,
+ [0][0][1][0][RTW89_WW][38] = 30,
+ [0][0][1][0][RTW89_WW][40] = 30,
+ [0][0][1][0][RTW89_WW][42] = 30,
+ [0][0][1][0][RTW89_WW][44] = 30,
+ [0][0][1][0][RTW89_WW][46] = 30,
+ [0][0][1][0][RTW89_WW][48] = 80,
+ [0][0][1][0][RTW89_WW][50] = 80,
+ [0][0][1][0][RTW89_WW][52] = 80,
+ [0][1][1][0][RTW89_WW][0] = 42,
+ [0][1][1][0][RTW89_WW][2] = 42,
+ [0][1][1][0][RTW89_WW][4] = 42,
+ [0][1][1][0][RTW89_WW][6] = 42,
+ [0][1][1][0][RTW89_WW][8] = 48,
+ [0][1][1][0][RTW89_WW][10] = 48,
+ [0][1][1][0][RTW89_WW][12] = 48,
+ [0][1][1][0][RTW89_WW][14] = 48,
+ [0][1][1][0][RTW89_WW][15] = 48,
+ [0][1][1][0][RTW89_WW][17] = 48,
+ [0][1][1][0][RTW89_WW][19] = 48,
+ [0][1][1][0][RTW89_WW][21] = 48,
+ [0][1][1][0][RTW89_WW][23] = 48,
+ [0][1][1][0][RTW89_WW][25] = 48,
+ [0][1][1][0][RTW89_WW][27] = 48,
+ [0][1][1][0][RTW89_WW][29] = 48,
+ [0][1][1][0][RTW89_WW][31] = 48,
+ [0][1][1][0][RTW89_WW][33] = 48,
+ [0][1][1][0][RTW89_WW][35] = 48,
+ [0][1][1][0][RTW89_WW][37] = 70,
+ [0][1][1][0][RTW89_WW][38] = 18,
+ [0][1][1][0][RTW89_WW][40] = 16,
+ [0][1][1][0][RTW89_WW][42] = 18,
+ [0][1][1][0][RTW89_WW][44] = 16,
+ [0][1][1][0][RTW89_WW][46] = 18,
+ [0][1][1][0][RTW89_WW][48] = 58,
+ [0][1][1][0][RTW89_WW][50] = 58,
+ [0][1][1][0][RTW89_WW][52] = 58,
+ [0][0][2][0][RTW89_WW][0] = 62,
+ [0][0][2][0][RTW89_WW][2] = 62,
+ [0][0][2][0][RTW89_WW][4] = 62,
+ [0][0][2][0][RTW89_WW][6] = 62,
+ [0][0][2][0][RTW89_WW][8] = 62,
+ [0][0][2][0][RTW89_WW][10] = 62,
+ [0][0][2][0][RTW89_WW][12] = 62,
+ [0][0][2][0][RTW89_WW][14] = 62,
+ [0][0][2][0][RTW89_WW][15] = 62,
+ [0][0][2][0][RTW89_WW][17] = 62,
+ [0][0][2][0][RTW89_WW][19] = 62,
+ [0][0][2][0][RTW89_WW][21] = 62,
+ [0][0][2][0][RTW89_WW][23] = 62,
+ [0][0][2][0][RTW89_WW][25] = 62,
+ [0][0][2][0][RTW89_WW][27] = 62,
+ [0][0][2][0][RTW89_WW][29] = 62,
+ [0][0][2][0][RTW89_WW][31] = 62,
+ [0][0][2][0][RTW89_WW][33] = 62,
+ [0][0][2][0][RTW89_WW][35] = 62,
+ [0][0][2][0][RTW89_WW][37] = 78,
+ [0][0][2][0][RTW89_WW][38] = 30,
+ [0][0][2][0][RTW89_WW][40] = 30,
+ [0][0][2][0][RTW89_WW][42] = 30,
+ [0][0][2][0][RTW89_WW][44] = 30,
+ [0][0][2][0][RTW89_WW][46] = 30,
+ [0][0][2][0][RTW89_WW][48] = 80,
+ [0][0][2][0][RTW89_WW][50] = 80,
+ [0][0][2][0][RTW89_WW][52] = 80,
+ [0][1][2][0][RTW89_WW][0] = 44,
+ [0][1][2][0][RTW89_WW][2] = 44,
+ [0][1][2][0][RTW89_WW][4] = 44,
+ [0][1][2][0][RTW89_WW][6] = 44,
+ [0][1][2][0][RTW89_WW][8] = 50,
+ [0][1][2][0][RTW89_WW][10] = 50,
+ [0][1][2][0][RTW89_WW][12] = 50,
+ [0][1][2][0][RTW89_WW][14] = 50,
+ [0][1][2][0][RTW89_WW][15] = 50,
+ [0][1][2][0][RTW89_WW][17] = 50,
+ [0][1][2][0][RTW89_WW][19] = 50,
+ [0][1][2][0][RTW89_WW][21] = 50,
+ [0][1][2][0][RTW89_WW][23] = 50,
+ [0][1][2][0][RTW89_WW][25] = 50,
+ [0][1][2][0][RTW89_WW][27] = 50,
+ [0][1][2][0][RTW89_WW][29] = 50,
+ [0][1][2][0][RTW89_WW][31] = 50,
+ [0][1][2][0][RTW89_WW][33] = 50,
+ [0][1][2][0][RTW89_WW][35] = 50,
+ [0][1][2][0][RTW89_WW][37] = 72,
+ [0][1][2][0][RTW89_WW][38] = 18,
+ [0][1][2][0][RTW89_WW][40] = 18,
+ [0][1][2][0][RTW89_WW][42] = 18,
+ [0][1][2][0][RTW89_WW][44] = 18,
+ [0][1][2][0][RTW89_WW][46] = 18,
+ [0][1][2][0][RTW89_WW][48] = 60,
+ [0][1][2][0][RTW89_WW][50] = 60,
+ [0][1][2][0][RTW89_WW][52] = 60,
+ [0][1][2][1][RTW89_WW][0] = 38,
+ [0][1][2][1][RTW89_WW][2] = 38,
+ [0][1][2][1][RTW89_WW][4] = 38,
+ [0][1][2][1][RTW89_WW][6] = 38,
+ [0][1][2][1][RTW89_WW][8] = 38,
+ [0][1][2][1][RTW89_WW][10] = 38,
+ [0][1][2][1][RTW89_WW][12] = 38,
+ [0][1][2][1][RTW89_WW][14] = 38,
+ [0][1][2][1][RTW89_WW][15] = 38,
+ [0][1][2][1][RTW89_WW][17] = 38,
+ [0][1][2][1][RTW89_WW][19] = 38,
+ [0][1][2][1][RTW89_WW][21] = 38,
+ [0][1][2][1][RTW89_WW][23] = 38,
+ [0][1][2][1][RTW89_WW][25] = 42,
+ [0][1][2][1][RTW89_WW][27] = 42,
+ [0][1][2][1][RTW89_WW][29] = 42,
+ [0][1][2][1][RTW89_WW][31] = 38,
+ [0][1][2][1][RTW89_WW][33] = 38,
+ [0][1][2][1][RTW89_WW][35] = 38,
+ [0][1][2][1][RTW89_WW][37] = 70,
+ [0][1][2][1][RTW89_WW][38] = 8,
+ [0][1][2][1][RTW89_WW][40] = 8,
+ [0][1][2][1][RTW89_WW][42] = 8,
+ [0][1][2][1][RTW89_WW][44] = 8,
+ [0][1][2][1][RTW89_WW][46] = 8,
+ [0][1][2][1][RTW89_WW][48] = 60,
+ [0][1][2][1][RTW89_WW][50] = 60,
+ [0][1][2][1][RTW89_WW][52] = 60,
+ [1][0][2][0][RTW89_WW][1] = 66,
+ [1][0][2][0][RTW89_WW][5] = 66,
+ [1][0][2][0][RTW89_WW][9] = 66,
+ [1][0][2][0][RTW89_WW][13] = 66,
+ [1][0][2][0][RTW89_WW][16] = 66,
+ [1][0][2][0][RTW89_WW][20] = 66,
+ [1][0][2][0][RTW89_WW][24] = 66,
+ [1][0][2][0][RTW89_WW][28] = 66,
+ [1][0][2][0][RTW89_WW][32] = 66,
+ [1][0][2][0][RTW89_WW][36] = 76,
+ [1][0][2][0][RTW89_WW][39] = 30,
+ [1][0][2][0][RTW89_WW][43] = 30,
+ [1][0][2][0][RTW89_WW][47] = 80,
+ [1][0][2][0][RTW89_WW][51] = 72,
+ [1][1][2][0][RTW89_WW][1] = 54,
+ [1][1][2][0][RTW89_WW][5] = 54,
+ [1][1][2][0][RTW89_WW][9] = 54,
+ [1][1][2][0][RTW89_WW][13] = 54,
+ [1][1][2][0][RTW89_WW][16] = 54,
+ [1][1][2][0][RTW89_WW][20] = 54,
+ [1][1][2][0][RTW89_WW][24] = 54,
+ [1][1][2][0][RTW89_WW][28] = 54,
+ [1][1][2][0][RTW89_WW][32] = 54,
+ [1][1][2][0][RTW89_WW][36] = 72,
+ [1][1][2][0][RTW89_WW][39] = 18,
+ [1][1][2][0][RTW89_WW][43] = 18,
+ [1][1][2][0][RTW89_WW][47] = 70,
+ [1][1][2][0][RTW89_WW][51] = 68,
+ [1][1][2][1][RTW89_WW][1] = 42,
+ [1][1][2][1][RTW89_WW][5] = 42,
+ [1][1][2][1][RTW89_WW][9] = 42,
+ [1][1][2][1][RTW89_WW][13] = 42,
+ [1][1][2][1][RTW89_WW][16] = 42,
+ [1][1][2][1][RTW89_WW][20] = 42,
+ [1][1][2][1][RTW89_WW][24] = 42,
+ [1][1][2][1][RTW89_WW][28] = 42,
+ [1][1][2][1][RTW89_WW][32] = 42,
+ [1][1][2][1][RTW89_WW][36] = 70,
+ [1][1][2][1][RTW89_WW][39] = 8,
+ [1][1][2][1][RTW89_WW][43] = 8,
+ [1][1][2][1][RTW89_WW][47] = 70,
+ [1][1][2][1][RTW89_WW][51] = 68,
+ [2][0][2][0][RTW89_WW][3] = 64,
+ [2][0][2][0][RTW89_WW][11] = 66,
+ [2][0][2][0][RTW89_WW][18] = 64,
+ [2][0][2][0][RTW89_WW][26] = 66,
+ [2][0][2][0][RTW89_WW][34] = 72,
+ [2][0][2][0][RTW89_WW][41] = 30,
+ [2][0][2][0][RTW89_WW][49] = 66,
+ [2][1][2][0][RTW89_WW][3] = 54,
+ [2][1][2][0][RTW89_WW][11] = 54,
+ [2][1][2][0][RTW89_WW][18] = 54,
+ [2][1][2][0][RTW89_WW][26] = 54,
+ [2][1][2][0][RTW89_WW][34] = 72,
+ [2][1][2][0][RTW89_WW][41] = 18,
+ [2][1][2][0][RTW89_WW][49] = 60,
+ [2][1][2][1][RTW89_WW][3] = 42,
+ [2][1][2][1][RTW89_WW][11] = 42,
+ [2][1][2][1][RTW89_WW][18] = 42,
+ [2][1][2][1][RTW89_WW][26] = 44,
+ [2][1][2][1][RTW89_WW][34] = 70,
+ [2][1][2][1][RTW89_WW][41] = 8,
+ [2][1][2][1][RTW89_WW][49] = 60,
+ [3][0][2][0][RTW89_WW][7] = 56,
+ [3][0][2][0][RTW89_WW][22] = 56,
+ [3][0][2][0][RTW89_WW][45] = 56,
+ [3][1][2][0][RTW89_WW][7] = 44,
+ [3][1][2][0][RTW89_WW][22] = 44,
+ [3][1][2][0][RTW89_WW][45] = 44,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 32,
+ [3][1][2][1][RTW89_WW][45] = 32,
+ [0][0][1][0][RTW89_FCC][0] = 80,
+ [0][0][1][0][RTW89_ETSI][0] = 60,
+ [0][0][1][0][RTW89_MKK][0] = 62,
+ [0][0][1][0][RTW89_IC][0] = 62,
+ [0][0][1][0][RTW89_ACMA][0] = 60,
+ [0][0][1][0][RTW89_FCC][2] = 80,
+ [0][0][1][0][RTW89_ETSI][2] = 60,
+ [0][0][1][0][RTW89_MKK][2] = 62,
+ [0][0][1][0][RTW89_IC][2] = 62,
+ [0][0][1][0][RTW89_ACMA][2] = 60,
+ [0][0][1][0][RTW89_FCC][4] = 80,
+ [0][0][1][0][RTW89_ETSI][4] = 60,
+ [0][0][1][0][RTW89_MKK][4] = 62,
+ [0][0][1][0][RTW89_IC][4] = 62,
+ [0][0][1][0][RTW89_ACMA][4] = 60,
+ [0][0][1][0][RTW89_FCC][6] = 80,
+ [0][0][1][0][RTW89_ETSI][6] = 60,
+ [0][0][1][0][RTW89_MKK][6] = 62,
+ [0][0][1][0][RTW89_IC][6] = 62,
+ [0][0][1][0][RTW89_ACMA][6] = 60,
+ [0][0][1][0][RTW89_FCC][8] = 80,
+ [0][0][1][0][RTW89_ETSI][8] = 60,
+ [0][0][1][0][RTW89_MKK][8] = 64,
+ [0][0][1][0][RTW89_IC][8] = 66,
+ [0][0][1][0][RTW89_ACMA][8] = 60,
+ [0][0][1][0][RTW89_FCC][10] = 80,
+ [0][0][1][0][RTW89_ETSI][10] = 60,
+ [0][0][1][0][RTW89_MKK][10] = 64,
+ [0][0][1][0][RTW89_IC][10] = 66,
+ [0][0][1][0][RTW89_ACMA][10] = 60,
+ [0][0][1][0][RTW89_FCC][12] = 80,
+ [0][0][1][0][RTW89_ETSI][12] = 60,
+ [0][0][1][0][RTW89_MKK][12] = 64,
+ [0][0][1][0][RTW89_IC][12] = 66,
+ [0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_FCC][14] = 80,
+ [0][0][1][0][RTW89_ETSI][14] = 60,
+ [0][0][1][0][RTW89_MKK][14] = 62,
+ [0][0][1][0][RTW89_IC][14] = 66,
+ [0][0][1][0][RTW89_ACMA][14] = 60,
+ [0][0][1][0][RTW89_FCC][15] = 78,
+ [0][0][1][0][RTW89_ETSI][15] = 60,
+ [0][0][1][0][RTW89_MKK][15] = 78,
+ [0][0][1][0][RTW89_IC][15] = 78,
+ [0][0][1][0][RTW89_ACMA][15] = 60,
+ [0][0][1][0][RTW89_FCC][17] = 80,
+ [0][0][1][0][RTW89_ETSI][17] = 60,
+ [0][0][1][0][RTW89_MKK][17] = 78,
+ [0][0][1][0][RTW89_IC][17] = 80,
+ [0][0][1][0][RTW89_ACMA][17] = 60,
+ [0][0][1][0][RTW89_FCC][19] = 80,
+ [0][0][1][0][RTW89_ETSI][19] = 60,
+ [0][0][1][0][RTW89_MKK][19] = 78,
+ [0][0][1][0][RTW89_IC][19] = 80,
+ [0][0][1][0][RTW89_ACMA][19] = 60,
+ [0][0][1][0][RTW89_FCC][21] = 80,
+ [0][0][1][0][RTW89_ETSI][21] = 60,
+ [0][0][1][0][RTW89_MKK][21] = 78,
+ [0][0][1][0][RTW89_IC][21] = 80,
+ [0][0][1][0][RTW89_ACMA][21] = 60,
+ [0][0][1][0][RTW89_FCC][23] = 80,
+ [0][0][1][0][RTW89_ETSI][23] = 60,
+ [0][0][1][0][RTW89_MKK][23] = 78,
+ [0][0][1][0][RTW89_IC][23] = 80,
+ [0][0][1][0][RTW89_ACMA][23] = 60,
+ [0][0][1][0][RTW89_FCC][25] = 80,
+ [0][0][1][0][RTW89_ETSI][25] = 60,
+ [0][0][1][0][RTW89_MKK][25] = 78,
+ [0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_ACMA][25] = 127,
+ [0][0][1][0][RTW89_FCC][27] = 80,
+ [0][0][1][0][RTW89_ETSI][27] = 60,
+ [0][0][1][0][RTW89_MKK][27] = 78,
+ [0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_ACMA][27] = 127,
+ [0][0][1][0][RTW89_FCC][29] = 80,
+ [0][0][1][0][RTW89_ETSI][29] = 60,
+ [0][0][1][0][RTW89_MKK][29] = 78,
+ [0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_ACMA][29] = 127,
+ [0][0][1][0][RTW89_FCC][31] = 80,
+ [0][0][1][0][RTW89_ETSI][31] = 60,
+ [0][0][1][0][RTW89_MKK][31] = 78,
+ [0][0][1][0][RTW89_IC][31] = 80,
+ [0][0][1][0][RTW89_ACMA][31] = 60,
+ [0][0][1][0][RTW89_FCC][33] = 80,
+ [0][0][1][0][RTW89_ETSI][33] = 60,
+ [0][0][1][0][RTW89_MKK][33] = 78,
+ [0][0][1][0][RTW89_IC][33] = 80,
+ [0][0][1][0][RTW89_ACMA][33] = 60,
+ [0][0][1][0][RTW89_FCC][35] = 72,
+ [0][0][1][0][RTW89_ETSI][35] = 60,
+ [0][0][1][0][RTW89_MKK][35] = 78,
+ [0][0][1][0][RTW89_IC][35] = 72,
+ [0][0][1][0][RTW89_ACMA][35] = 60,
+ [0][0][1][0][RTW89_FCC][37] = 80,
+ [0][0][1][0][RTW89_ETSI][37] = 127,
+ [0][0][1][0][RTW89_MKK][37] = 78,
+ [0][0][1][0][RTW89_IC][37] = 80,
+ [0][0][1][0][RTW89_ACMA][37] = 78,
+ [0][0][1][0][RTW89_FCC][38] = 80,
+ [0][0][1][0][RTW89_ETSI][38] = 30,
+ [0][0][1][0][RTW89_MKK][38] = 127,
+ [0][0][1][0][RTW89_IC][38] = 80,
+ [0][0][1][0][RTW89_ACMA][38] = 78,
+ [0][0][1][0][RTW89_FCC][40] = 80,
+ [0][0][1][0][RTW89_ETSI][40] = 30,
+ [0][0][1][0][RTW89_MKK][40] = 127,
+ [0][0][1][0][RTW89_IC][40] = 80,
+ [0][0][1][0][RTW89_ACMA][40] = 78,
+ [0][0][1][0][RTW89_FCC][42] = 80,
+ [0][0][1][0][RTW89_ETSI][42] = 30,
+ [0][0][1][0][RTW89_MKK][42] = 127,
+ [0][0][1][0][RTW89_IC][42] = 80,
+ [0][0][1][0][RTW89_ACMA][42] = 78,
+ [0][0][1][0][RTW89_FCC][44] = 80,
+ [0][0][1][0][RTW89_ETSI][44] = 30,
+ [0][0][1][0][RTW89_MKK][44] = 127,
+ [0][0][1][0][RTW89_IC][44] = 80,
+ [0][0][1][0][RTW89_ACMA][44] = 78,
+ [0][0][1][0][RTW89_FCC][46] = 80,
+ [0][0][1][0][RTW89_ETSI][46] = 30,
+ [0][0][1][0][RTW89_MKK][46] = 127,
+ [0][0][1][0][RTW89_IC][46] = 80,
+ [0][0][1][0][RTW89_ACMA][46] = 78,
+ [0][0][1][0][RTW89_FCC][48] = 80,
+ [0][0][1][0][RTW89_ETSI][48] = 127,
+ [0][0][1][0][RTW89_MKK][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_ACMA][48] = 127,
+ [0][0][1][0][RTW89_FCC][50] = 80,
+ [0][0][1][0][RTW89_ETSI][50] = 127,
+ [0][0][1][0][RTW89_MKK][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_ACMA][50] = 127,
+ [0][0][1][0][RTW89_FCC][52] = 80,
+ [0][0][1][0][RTW89_ETSI][52] = 127,
+ [0][0][1][0][RTW89_MKK][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_ACMA][52] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 70,
+ [0][1][1][0][RTW89_ETSI][0] = 48,
+ [0][1][1][0][RTW89_MKK][0] = 50,
+ [0][1][1][0][RTW89_IC][0] = 42,
+ [0][1][1][0][RTW89_ACMA][0] = 48,
+ [0][1][1][0][RTW89_FCC][2] = 70,
+ [0][1][1][0][RTW89_ETSI][2] = 48,
+ [0][1][1][0][RTW89_MKK][2] = 50,
+ [0][1][1][0][RTW89_IC][2] = 42,
+ [0][1][1][0][RTW89_ACMA][2] = 48,
+ [0][1][1][0][RTW89_FCC][4] = 70,
+ [0][1][1][0][RTW89_ETSI][4] = 48,
+ [0][1][1][0][RTW89_MKK][4] = 50,
+ [0][1][1][0][RTW89_IC][4] = 42,
+ [0][1][1][0][RTW89_ACMA][4] = 48,
+ [0][1][1][0][RTW89_FCC][6] = 70,
+ [0][1][1][0][RTW89_ETSI][6] = 48,
+ [0][1][1][0][RTW89_MKK][6] = 50,
+ [0][1][1][0][RTW89_IC][6] = 42,
+ [0][1][1][0][RTW89_ACMA][6] = 48,
+ [0][1][1][0][RTW89_FCC][8] = 70,
+ [0][1][1][0][RTW89_ETSI][8] = 48,
+ [0][1][1][0][RTW89_MKK][8] = 50,
+ [0][1][1][0][RTW89_IC][8] = 54,
+ [0][1][1][0][RTW89_ACMA][8] = 48,
+ [0][1][1][0][RTW89_FCC][10] = 70,
+ [0][1][1][0][RTW89_ETSI][10] = 48,
+ [0][1][1][0][RTW89_MKK][10] = 50,
+ [0][1][1][0][RTW89_IC][10] = 54,
+ [0][1][1][0][RTW89_ACMA][10] = 48,
+ [0][1][1][0][RTW89_FCC][12] = 70,
+ [0][1][1][0][RTW89_ETSI][12] = 48,
+ [0][1][1][0][RTW89_MKK][12] = 50,
+ [0][1][1][0][RTW89_IC][12] = 54,
+ [0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_FCC][14] = 70,
+ [0][1][1][0][RTW89_ETSI][14] = 48,
+ [0][1][1][0][RTW89_MKK][14] = 50,
+ [0][1][1][0][RTW89_IC][14] = 54,
+ [0][1][1][0][RTW89_ACMA][14] = 48,
+ [0][1][1][0][RTW89_FCC][15] = 68,
+ [0][1][1][0][RTW89_ETSI][15] = 48,
+ [0][1][1][0][RTW89_MKK][15] = 70,
+ [0][1][1][0][RTW89_IC][15] = 68,
+ [0][1][1][0][RTW89_ACMA][15] = 48,
+ [0][1][1][0][RTW89_FCC][17] = 70,
+ [0][1][1][0][RTW89_ETSI][17] = 48,
+ [0][1][1][0][RTW89_MKK][17] = 72,
+ [0][1][1][0][RTW89_IC][17] = 70,
+ [0][1][1][0][RTW89_ACMA][17] = 48,
+ [0][1][1][0][RTW89_FCC][19] = 70,
+ [0][1][1][0][RTW89_ETSI][19] = 48,
+ [0][1][1][0][RTW89_MKK][19] = 72,
+ [0][1][1][0][RTW89_IC][19] = 70,
+ [0][1][1][0][RTW89_ACMA][19] = 48,
+ [0][1][1][0][RTW89_FCC][21] = 70,
+ [0][1][1][0][RTW89_ETSI][21] = 48,
+ [0][1][1][0][RTW89_MKK][21] = 72,
+ [0][1][1][0][RTW89_IC][21] = 70,
+ [0][1][1][0][RTW89_ACMA][21] = 48,
+ [0][1][1][0][RTW89_FCC][23] = 70,
+ [0][1][1][0][RTW89_ETSI][23] = 48,
+ [0][1][1][0][RTW89_MKK][23] = 72,
+ [0][1][1][0][RTW89_IC][23] = 70,
+ [0][1][1][0][RTW89_ACMA][23] = 48,
+ [0][1][1][0][RTW89_FCC][25] = 70,
+ [0][1][1][0][RTW89_ETSI][25] = 48,
+ [0][1][1][0][RTW89_MKK][25] = 70,
+ [0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_FCC][27] = 70,
+ [0][1][1][0][RTW89_ETSI][27] = 48,
+ [0][1][1][0][RTW89_MKK][27] = 72,
+ [0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_FCC][29] = 70,
+ [0][1][1][0][RTW89_ETSI][29] = 48,
+ [0][1][1][0][RTW89_MKK][29] = 72,
+ [0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_FCC][31] = 70,
+ [0][1][1][0][RTW89_ETSI][31] = 48,
+ [0][1][1][0][RTW89_MKK][31] = 72,
+ [0][1][1][0][RTW89_IC][31] = 70,
+ [0][1][1][0][RTW89_ACMA][31] = 48,
+ [0][1][1][0][RTW89_FCC][33] = 70,
+ [0][1][1][0][RTW89_ETSI][33] = 48,
+ [0][1][1][0][RTW89_MKK][33] = 72,
+ [0][1][1][0][RTW89_IC][33] = 70,
+ [0][1][1][0][RTW89_ACMA][33] = 48,
+ [0][1][1][0][RTW89_FCC][35] = 68,
+ [0][1][1][0][RTW89_ETSI][35] = 48,
+ [0][1][1][0][RTW89_MKK][35] = 72,
+ [0][1][1][0][RTW89_IC][35] = 68,
+ [0][1][1][0][RTW89_ACMA][35] = 48,
+ [0][1][1][0][RTW89_FCC][37] = 70,
+ [0][1][1][0][RTW89_ETSI][37] = 127,
+ [0][1][1][0][RTW89_MKK][37] = 72,
+ [0][1][1][0][RTW89_IC][37] = 70,
+ [0][1][1][0][RTW89_ACMA][37] = 72,
+ [0][1][1][0][RTW89_FCC][38] = 80,
+ [0][1][1][0][RTW89_ETSI][38] = 18,
+ [0][1][1][0][RTW89_MKK][38] = 127,
+ [0][1][1][0][RTW89_IC][38] = 80,
+ [0][1][1][0][RTW89_ACMA][38] = 74,
+ [0][1][1][0][RTW89_FCC][40] = 80,
+ [0][1][1][0][RTW89_ETSI][40] = 18,
+ [0][1][1][0][RTW89_MKK][40] = 127,
+ [0][1][1][0][RTW89_IC][40] = 80,
+ [0][1][1][0][RTW89_ACMA][40] = 16,
+ [0][1][1][0][RTW89_FCC][42] = 80,
+ [0][1][1][0][RTW89_ETSI][42] = 18,
+ [0][1][1][0][RTW89_MKK][42] = 127,
+ [0][1][1][0][RTW89_IC][42] = 80,
+ [0][1][1][0][RTW89_ACMA][42] = 78,
+ [0][1][1][0][RTW89_FCC][44] = 80,
+ [0][1][1][0][RTW89_ETSI][44] = 18,
+ [0][1][1][0][RTW89_MKK][44] = 127,
+ [0][1][1][0][RTW89_IC][44] = 80,
+ [0][1][1][0][RTW89_ACMA][44] = 16,
+ [0][1][1][0][RTW89_FCC][46] = 80,
+ [0][1][1][0][RTW89_ETSI][46] = 18,
+ [0][1][1][0][RTW89_MKK][46] = 127,
+ [0][1][1][0][RTW89_IC][46] = 80,
+ [0][1][1][0][RTW89_ACMA][46] = 78,
+ [0][1][1][0][RTW89_FCC][48] = 58,
+ [0][1][1][0][RTW89_ETSI][48] = 127,
+ [0][1][1][0][RTW89_MKK][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_ACMA][48] = 127,
+ [0][1][1][0][RTW89_FCC][50] = 58,
+ [0][1][1][0][RTW89_ETSI][50] = 127,
+ [0][1][1][0][RTW89_MKK][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_ACMA][50] = 127,
+ [0][1][1][0][RTW89_FCC][52] = 58,
+ [0][1][1][0][RTW89_ETSI][52] = 127,
+ [0][1][1][0][RTW89_MKK][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_ACMA][52] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 80,
+ [0][0][2][0][RTW89_ETSI][0] = 62,
+ [0][0][2][0][RTW89_MKK][0] = 64,
+ [0][0][2][0][RTW89_IC][0] = 66,
+ [0][0][2][0][RTW89_ACMA][0] = 62,
+ [0][0][2][0][RTW89_FCC][2] = 80,
+ [0][0][2][0][RTW89_ETSI][2] = 62,
+ [0][0][2][0][RTW89_MKK][2] = 64,
+ [0][0][2][0][RTW89_IC][2] = 66,
+ [0][0][2][0][RTW89_ACMA][2] = 62,
+ [0][0][2][0][RTW89_FCC][4] = 80,
+ [0][0][2][0][RTW89_ETSI][4] = 62,
+ [0][0][2][0][RTW89_MKK][4] = 64,
+ [0][0][2][0][RTW89_IC][4] = 66,
+ [0][0][2][0][RTW89_ACMA][4] = 62,
+ [0][0][2][0][RTW89_FCC][6] = 80,
+ [0][0][2][0][RTW89_ETSI][6] = 62,
+ [0][0][2][0][RTW89_MKK][6] = 64,
+ [0][0][2][0][RTW89_IC][6] = 66,
+ [0][0][2][0][RTW89_ACMA][6] = 62,
+ [0][0][2][0][RTW89_FCC][8] = 80,
+ [0][0][2][0][RTW89_ETSI][8] = 62,
+ [0][0][2][0][RTW89_MKK][8] = 64,
+ [0][0][2][0][RTW89_IC][8] = 66,
+ [0][0][2][0][RTW89_ACMA][8] = 62,
+ [0][0][2][0][RTW89_FCC][10] = 80,
+ [0][0][2][0][RTW89_ETSI][10] = 62,
+ [0][0][2][0][RTW89_MKK][10] = 64,
+ [0][0][2][0][RTW89_IC][10] = 66,
+ [0][0][2][0][RTW89_ACMA][10] = 62,
+ [0][0][2][0][RTW89_FCC][12] = 80,
+ [0][0][2][0][RTW89_ETSI][12] = 62,
+ [0][0][2][0][RTW89_MKK][12] = 64,
+ [0][0][2][0][RTW89_IC][12] = 66,
+ [0][0][2][0][RTW89_ACMA][12] = 62,
+ [0][0][2][0][RTW89_FCC][14] = 80,
+ [0][0][2][0][RTW89_ETSI][14] = 62,
+ [0][0][2][0][RTW89_MKK][14] = 64,
+ [0][0][2][0][RTW89_IC][14] = 66,
+ [0][0][2][0][RTW89_ACMA][14] = 62,
+ [0][0][2][0][RTW89_FCC][15] = 76,
+ [0][0][2][0][RTW89_ETSI][15] = 62,
+ [0][0][2][0][RTW89_MKK][15] = 78,
+ [0][0][2][0][RTW89_IC][15] = 76,
+ [0][0][2][0][RTW89_ACMA][15] = 62,
+ [0][0][2][0][RTW89_FCC][17] = 80,
+ [0][0][2][0][RTW89_ETSI][17] = 62,
+ [0][0][2][0][RTW89_MKK][17] = 78,
+ [0][0][2][0][RTW89_IC][17] = 80,
+ [0][0][2][0][RTW89_ACMA][17] = 62,
+ [0][0][2][0][RTW89_FCC][19] = 80,
+ [0][0][2][0][RTW89_ETSI][19] = 62,
+ [0][0][2][0][RTW89_MKK][19] = 78,
+ [0][0][2][0][RTW89_IC][19] = 80,
+ [0][0][2][0][RTW89_ACMA][19] = 62,
+ [0][0][2][0][RTW89_FCC][21] = 80,
+ [0][0][2][0][RTW89_ETSI][21] = 62,
+ [0][0][2][0][RTW89_MKK][21] = 78,
+ [0][0][2][0][RTW89_IC][21] = 80,
+ [0][0][2][0][RTW89_ACMA][21] = 62,
+ [0][0][2][0][RTW89_FCC][23] = 80,
+ [0][0][2][0][RTW89_ETSI][23] = 62,
+ [0][0][2][0][RTW89_MKK][23] = 78,
+ [0][0][2][0][RTW89_IC][23] = 80,
+ [0][0][2][0][RTW89_ACMA][23] = 62,
+ [0][0][2][0][RTW89_FCC][25] = 80,
+ [0][0][2][0][RTW89_ETSI][25] = 62,
+ [0][0][2][0][RTW89_MKK][25] = 78,
+ [0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_ACMA][25] = 127,
+ [0][0][2][0][RTW89_FCC][27] = 80,
+ [0][0][2][0][RTW89_ETSI][27] = 62,
+ [0][0][2][0][RTW89_MKK][27] = 78,
+ [0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_ACMA][27] = 127,
+ [0][0][2][0][RTW89_FCC][29] = 80,
+ [0][0][2][0][RTW89_ETSI][29] = 62,
+ [0][0][2][0][RTW89_MKK][29] = 78,
+ [0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_ACMA][29] = 127,
+ [0][0][2][0][RTW89_FCC][31] = 80,
+ [0][0][2][0][RTW89_ETSI][31] = 62,
+ [0][0][2][0][RTW89_MKK][31] = 78,
+ [0][0][2][0][RTW89_IC][31] = 80,
+ [0][0][2][0][RTW89_ACMA][31] = 62,
+ [0][0][2][0][RTW89_FCC][33] = 80,
+ [0][0][2][0][RTW89_ETSI][33] = 62,
+ [0][0][2][0][RTW89_MKK][33] = 78,
+ [0][0][2][0][RTW89_IC][33] = 80,
+ [0][0][2][0][RTW89_ACMA][33] = 62,
+ [0][0][2][0][RTW89_FCC][35] = 72,
+ [0][0][2][0][RTW89_ETSI][35] = 62,
+ [0][0][2][0][RTW89_MKK][35] = 78,
+ [0][0][2][0][RTW89_IC][35] = 72,
+ [0][0][2][0][RTW89_ACMA][35] = 62,
+ [0][0][2][0][RTW89_FCC][37] = 80,
+ [0][0][2][0][RTW89_ETSI][37] = 127,
+ [0][0][2][0][RTW89_MKK][37] = 78,
+ [0][0][2][0][RTW89_IC][37] = 80,
+ [0][0][2][0][RTW89_ACMA][37] = 78,
+ [0][0][2][0][RTW89_FCC][38] = 80,
+ [0][0][2][0][RTW89_ETSI][38] = 30,
+ [0][0][2][0][RTW89_MKK][38] = 127,
+ [0][0][2][0][RTW89_IC][38] = 80,
+ [0][0][2][0][RTW89_ACMA][38] = 78,
+ [0][0][2][0][RTW89_FCC][40] = 80,
+ [0][0][2][0][RTW89_ETSI][40] = 30,
+ [0][0][2][0][RTW89_MKK][40] = 127,
+ [0][0][2][0][RTW89_IC][40] = 80,
+ [0][0][2][0][RTW89_ACMA][40] = 78,
+ [0][0][2][0][RTW89_FCC][42] = 80,
+ [0][0][2][0][RTW89_ETSI][42] = 30,
+ [0][0][2][0][RTW89_MKK][42] = 127,
+ [0][0][2][0][RTW89_IC][42] = 80,
+ [0][0][2][0][RTW89_ACMA][42] = 78,
+ [0][0][2][0][RTW89_FCC][44] = 80,
+ [0][0][2][0][RTW89_ETSI][44] = 30,
+ [0][0][2][0][RTW89_MKK][44] = 127,
+ [0][0][2][0][RTW89_IC][44] = 80,
+ [0][0][2][0][RTW89_ACMA][44] = 78,
+ [0][0][2][0][RTW89_FCC][46] = 80,
+ [0][0][2][0][RTW89_ETSI][46] = 30,
+ [0][0][2][0][RTW89_MKK][46] = 127,
+ [0][0][2][0][RTW89_IC][46] = 80,
+ [0][0][2][0][RTW89_ACMA][46] = 78,
+ [0][0][2][0][RTW89_FCC][48] = 80,
+ [0][0][2][0][RTW89_ETSI][48] = 127,
+ [0][0][2][0][RTW89_MKK][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_ACMA][48] = 127,
+ [0][0][2][0][RTW89_FCC][50] = 80,
+ [0][0][2][0][RTW89_ETSI][50] = 127,
+ [0][0][2][0][RTW89_MKK][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_ACMA][50] = 127,
+ [0][0][2][0][RTW89_FCC][52] = 80,
+ [0][0][2][0][RTW89_ETSI][52] = 127,
+ [0][0][2][0][RTW89_MKK][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 72,
+ [0][1][2][0][RTW89_ETSI][0] = 50,
+ [0][1][2][0][RTW89_MKK][0] = 52,
+ [0][1][2][0][RTW89_IC][0] = 44,
+ [0][1][2][0][RTW89_ACMA][0] = 50,
+ [0][1][2][0][RTW89_FCC][2] = 72,
+ [0][1][2][0][RTW89_ETSI][2] = 50,
+ [0][1][2][0][RTW89_MKK][2] = 52,
+ [0][1][2][0][RTW89_IC][2] = 44,
+ [0][1][2][0][RTW89_ACMA][2] = 50,
+ [0][1][2][0][RTW89_FCC][4] = 72,
+ [0][1][2][0][RTW89_ETSI][4] = 50,
+ [0][1][2][0][RTW89_MKK][4] = 52,
+ [0][1][2][0][RTW89_IC][4] = 44,
+ [0][1][2][0][RTW89_ACMA][4] = 50,
+ [0][1][2][0][RTW89_FCC][6] = 72,
+ [0][1][2][0][RTW89_ETSI][6] = 50,
+ [0][1][2][0][RTW89_MKK][6] = 52,
+ [0][1][2][0][RTW89_IC][6] = 44,
+ [0][1][2][0][RTW89_ACMA][6] = 50,
+ [0][1][2][0][RTW89_FCC][8] = 72,
+ [0][1][2][0][RTW89_ETSI][8] = 50,
+ [0][1][2][0][RTW89_MKK][8] = 52,
+ [0][1][2][0][RTW89_IC][8] = 54,
+ [0][1][2][0][RTW89_ACMA][8] = 50,
+ [0][1][2][0][RTW89_FCC][10] = 72,
+ [0][1][2][0][RTW89_ETSI][10] = 50,
+ [0][1][2][0][RTW89_MKK][10] = 52,
+ [0][1][2][0][RTW89_IC][10] = 54,
+ [0][1][2][0][RTW89_ACMA][10] = 50,
+ [0][1][2][0][RTW89_FCC][12] = 72,
+ [0][1][2][0][RTW89_ETSI][12] = 50,
+ [0][1][2][0][RTW89_MKK][12] = 52,
+ [0][1][2][0][RTW89_IC][12] = 54,
+ [0][1][2][0][RTW89_ACMA][12] = 50,
+ [0][1][2][0][RTW89_FCC][14] = 72,
+ [0][1][2][0][RTW89_ETSI][14] = 50,
+ [0][1][2][0][RTW89_MKK][14] = 52,
+ [0][1][2][0][RTW89_IC][14] = 54,
+ [0][1][2][0][RTW89_ACMA][14] = 50,
+ [0][1][2][0][RTW89_FCC][15] = 70,
+ [0][1][2][0][RTW89_ETSI][15] = 50,
+ [0][1][2][0][RTW89_MKK][15] = 72,
+ [0][1][2][0][RTW89_IC][15] = 70,
+ [0][1][2][0][RTW89_ACMA][15] = 50,
+ [0][1][2][0][RTW89_FCC][17] = 72,
+ [0][1][2][0][RTW89_ETSI][17] = 50,
+ [0][1][2][0][RTW89_MKK][17] = 72,
+ [0][1][2][0][RTW89_IC][17] = 72,
+ [0][1][2][0][RTW89_ACMA][17] = 50,
+ [0][1][2][0][RTW89_FCC][19] = 72,
+ [0][1][2][0][RTW89_ETSI][19] = 50,
+ [0][1][2][0][RTW89_MKK][19] = 72,
+ [0][1][2][0][RTW89_IC][19] = 72,
+ [0][1][2][0][RTW89_ACMA][19] = 50,
+ [0][1][2][0][RTW89_FCC][21] = 72,
+ [0][1][2][0][RTW89_ETSI][21] = 50,
+ [0][1][2][0][RTW89_MKK][21] = 72,
+ [0][1][2][0][RTW89_IC][21] = 72,
+ [0][1][2][0][RTW89_ACMA][21] = 50,
+ [0][1][2][0][RTW89_FCC][23] = 72,
+ [0][1][2][0][RTW89_ETSI][23] = 50,
+ [0][1][2][0][RTW89_MKK][23] = 72,
+ [0][1][2][0][RTW89_IC][23] = 72,
+ [0][1][2][0][RTW89_ACMA][23] = 50,
+ [0][1][2][0][RTW89_FCC][25] = 72,
+ [0][1][2][0][RTW89_ETSI][25] = 50,
+ [0][1][2][0][RTW89_MKK][25] = 72,
+ [0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_FCC][27] = 72,
+ [0][1][2][0][RTW89_ETSI][27] = 50,
+ [0][1][2][0][RTW89_MKK][27] = 72,
+ [0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_FCC][29] = 72,
+ [0][1][2][0][RTW89_ETSI][29] = 50,
+ [0][1][2][0][RTW89_MKK][29] = 72,
+ [0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_FCC][31] = 72,
+ [0][1][2][0][RTW89_ETSI][31] = 50,
+ [0][1][2][0][RTW89_MKK][31] = 72,
+ [0][1][2][0][RTW89_IC][31] = 72,
+ [0][1][2][0][RTW89_ACMA][31] = 50,
+ [0][1][2][0][RTW89_FCC][33] = 72,
+ [0][1][2][0][RTW89_ETSI][33] = 50,
+ [0][1][2][0][RTW89_MKK][33] = 72,
+ [0][1][2][0][RTW89_IC][33] = 72,
+ [0][1][2][0][RTW89_ACMA][33] = 50,
+ [0][1][2][0][RTW89_FCC][35] = 68,
+ [0][1][2][0][RTW89_ETSI][35] = 50,
+ [0][1][2][0][RTW89_MKK][35] = 72,
+ [0][1][2][0][RTW89_IC][35] = 68,
+ [0][1][2][0][RTW89_ACMA][35] = 50,
+ [0][1][2][0][RTW89_FCC][37] = 72,
+ [0][1][2][0][RTW89_ETSI][37] = 127,
+ [0][1][2][0][RTW89_MKK][37] = 72,
+ [0][1][2][0][RTW89_IC][37] = 72,
+ [0][1][2][0][RTW89_ACMA][37] = 72,
+ [0][1][2][0][RTW89_FCC][38] = 80,
+ [0][1][2][0][RTW89_ETSI][38] = 18,
+ [0][1][2][0][RTW89_MKK][38] = 127,
+ [0][1][2][0][RTW89_IC][38] = 80,
+ [0][1][2][0][RTW89_ACMA][38] = 76,
+ [0][1][2][0][RTW89_FCC][40] = 80,
+ [0][1][2][0][RTW89_ETSI][40] = 18,
+ [0][1][2][0][RTW89_MKK][40] = 127,
+ [0][1][2][0][RTW89_IC][40] = 80,
+ [0][1][2][0][RTW89_ACMA][40] = 76,
+ [0][1][2][0][RTW89_FCC][42] = 80,
+ [0][1][2][0][RTW89_ETSI][42] = 18,
+ [0][1][2][0][RTW89_MKK][42] = 127,
+ [0][1][2][0][RTW89_IC][42] = 80,
+ [0][1][2][0][RTW89_ACMA][42] = 78,
+ [0][1][2][0][RTW89_FCC][44] = 80,
+ [0][1][2][0][RTW89_ETSI][44] = 18,
+ [0][1][2][0][RTW89_MKK][44] = 127,
+ [0][1][2][0][RTW89_IC][44] = 80,
+ [0][1][2][0][RTW89_ACMA][44] = 78,
+ [0][1][2][0][RTW89_FCC][46] = 80,
+ [0][1][2][0][RTW89_ETSI][46] = 18,
+ [0][1][2][0][RTW89_MKK][46] = 127,
+ [0][1][2][0][RTW89_IC][46] = 80,
+ [0][1][2][0][RTW89_ACMA][46] = 78,
+ [0][1][2][0][RTW89_FCC][48] = 60,
+ [0][1][2][0][RTW89_ETSI][48] = 127,
+ [0][1][2][0][RTW89_MKK][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_ACMA][48] = 127,
+ [0][1][2][0][RTW89_FCC][50] = 60,
+ [0][1][2][0][RTW89_ETSI][50] = 127,
+ [0][1][2][0][RTW89_MKK][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_ACMA][50] = 127,
+ [0][1][2][0][RTW89_FCC][52] = 60,
+ [0][1][2][0][RTW89_ETSI][52] = 127,
+ [0][1][2][0][RTW89_MKK][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 70,
+ [0][1][2][1][RTW89_ETSI][0] = 42,
+ [0][1][2][1][RTW89_MKK][0] = 52,
+ [0][1][2][1][RTW89_IC][0] = 42,
+ [0][1][2][1][RTW89_ACMA][0] = 38,
+ [0][1][2][1][RTW89_FCC][2] = 70,
+ [0][1][2][1][RTW89_ETSI][2] = 42,
+ [0][1][2][1][RTW89_MKK][2] = 52,
+ [0][1][2][1][RTW89_IC][2] = 42,
+ [0][1][2][1][RTW89_ACMA][2] = 38,
+ [0][1][2][1][RTW89_FCC][4] = 70,
+ [0][1][2][1][RTW89_ETSI][4] = 42,
+ [0][1][2][1][RTW89_MKK][4] = 52,
+ [0][1][2][1][RTW89_IC][4] = 42,
+ [0][1][2][1][RTW89_ACMA][4] = 38,
+ [0][1][2][1][RTW89_FCC][6] = 70,
+ [0][1][2][1][RTW89_ETSI][6] = 42,
+ [0][1][2][1][RTW89_MKK][6] = 52,
+ [0][1][2][1][RTW89_IC][6] = 42,
+ [0][1][2][1][RTW89_ACMA][6] = 38,
+ [0][1][2][1][RTW89_FCC][8] = 70,
+ [0][1][2][1][RTW89_ETSI][8] = 42,
+ [0][1][2][1][RTW89_MKK][8] = 52,
+ [0][1][2][1][RTW89_IC][8] = 42,
+ [0][1][2][1][RTW89_ACMA][8] = 38,
+ [0][1][2][1][RTW89_FCC][10] = 70,
+ [0][1][2][1][RTW89_ETSI][10] = 42,
+ [0][1][2][1][RTW89_MKK][10] = 52,
+ [0][1][2][1][RTW89_IC][10] = 42,
+ [0][1][2][1][RTW89_ACMA][10] = 38,
+ [0][1][2][1][RTW89_FCC][12] = 70,
+ [0][1][2][1][RTW89_ETSI][12] = 42,
+ [0][1][2][1][RTW89_MKK][12] = 52,
+ [0][1][2][1][RTW89_IC][12] = 42,
+ [0][1][2][1][RTW89_ACMA][12] = 38,
+ [0][1][2][1][RTW89_FCC][14] = 70,
+ [0][1][2][1][RTW89_ETSI][14] = 42,
+ [0][1][2][1][RTW89_MKK][14] = 52,
+ [0][1][2][1][RTW89_IC][14] = 42,
+ [0][1][2][1][RTW89_ACMA][14] = 38,
+ [0][1][2][1][RTW89_FCC][15] = 70,
+ [0][1][2][1][RTW89_ETSI][15] = 42,
+ [0][1][2][1][RTW89_MKK][15] = 72,
+ [0][1][2][1][RTW89_IC][15] = 70,
+ [0][1][2][1][RTW89_ACMA][15] = 38,
+ [0][1][2][1][RTW89_FCC][17] = 70,
+ [0][1][2][1][RTW89_ETSI][17] = 42,
+ [0][1][2][1][RTW89_MKK][17] = 72,
+ [0][1][2][1][RTW89_IC][17] = 70,
+ [0][1][2][1][RTW89_ACMA][17] = 38,
+ [0][1][2][1][RTW89_FCC][19] = 70,
+ [0][1][2][1][RTW89_ETSI][19] = 42,
+ [0][1][2][1][RTW89_MKK][19] = 72,
+ [0][1][2][1][RTW89_IC][19] = 70,
+ [0][1][2][1][RTW89_ACMA][19] = 38,
+ [0][1][2][1][RTW89_FCC][21] = 70,
+ [0][1][2][1][RTW89_ETSI][21] = 42,
+ [0][1][2][1][RTW89_MKK][21] = 72,
+ [0][1][2][1][RTW89_IC][21] = 70,
+ [0][1][2][1][RTW89_ACMA][21] = 38,
+ [0][1][2][1][RTW89_FCC][23] = 70,
+ [0][1][2][1][RTW89_ETSI][23] = 42,
+ [0][1][2][1][RTW89_MKK][23] = 72,
+ [0][1][2][1][RTW89_IC][23] = 70,
+ [0][1][2][1][RTW89_ACMA][23] = 38,
+ [0][1][2][1][RTW89_FCC][25] = 68,
+ [0][1][2][1][RTW89_ETSI][25] = 42,
+ [0][1][2][1][RTW89_MKK][25] = 72,
+ [0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_ACMA][25] = 127,
+ [0][1][2][1][RTW89_FCC][27] = 68,
+ [0][1][2][1][RTW89_ETSI][27] = 42,
+ [0][1][2][1][RTW89_MKK][27] = 72,
+ [0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_ACMA][27] = 127,
+ [0][1][2][1][RTW89_FCC][29] = 68,
+ [0][1][2][1][RTW89_ETSI][29] = 42,
+ [0][1][2][1][RTW89_MKK][29] = 72,
+ [0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_ACMA][29] = 127,
+ [0][1][2][1][RTW89_FCC][31] = 68,
+ [0][1][2][1][RTW89_ETSI][31] = 42,
+ [0][1][2][1][RTW89_MKK][31] = 72,
+ [0][1][2][1][RTW89_IC][31] = 68,
+ [0][1][2][1][RTW89_ACMA][31] = 38,
+ [0][1][2][1][RTW89_FCC][33] = 68,
+ [0][1][2][1][RTW89_ETSI][33] = 42,
+ [0][1][2][1][RTW89_MKK][33] = 72,
+ [0][1][2][1][RTW89_IC][33] = 68,
+ [0][1][2][1][RTW89_ACMA][33] = 38,
+ [0][1][2][1][RTW89_FCC][35] = 68,
+ [0][1][2][1][RTW89_ETSI][35] = 42,
+ [0][1][2][1][RTW89_MKK][35] = 72,
+ [0][1][2][1][RTW89_IC][35] = 68,
+ [0][1][2][1][RTW89_ACMA][35] = 38,
+ [0][1][2][1][RTW89_FCC][37] = 70,
+ [0][1][2][1][RTW89_ETSI][37] = 127,
+ [0][1][2][1][RTW89_MKK][37] = 72,
+ [0][1][2][1][RTW89_IC][37] = 70,
+ [0][1][2][1][RTW89_ACMA][37] = 72,
+ [0][1][2][1][RTW89_FCC][38] = 80,
+ [0][1][2][1][RTW89_ETSI][38] = 8,
+ [0][1][2][1][RTW89_MKK][38] = 127,
+ [0][1][2][1][RTW89_IC][38] = 80,
+ [0][1][2][1][RTW89_ACMA][38] = 76,
+ [0][1][2][1][RTW89_FCC][40] = 80,
+ [0][1][2][1][RTW89_ETSI][40] = 8,
+ [0][1][2][1][RTW89_MKK][40] = 127,
+ [0][1][2][1][RTW89_IC][40] = 80,
+ [0][1][2][1][RTW89_ACMA][40] = 76,
+ [0][1][2][1][RTW89_FCC][42] = 80,
+ [0][1][2][1][RTW89_ETSI][42] = 8,
+ [0][1][2][1][RTW89_MKK][42] = 127,
+ [0][1][2][1][RTW89_IC][42] = 80,
+ [0][1][2][1][RTW89_ACMA][42] = 78,
+ [0][1][2][1][RTW89_FCC][44] = 80,
+ [0][1][2][1][RTW89_ETSI][44] = 8,
+ [0][1][2][1][RTW89_MKK][44] = 127,
+ [0][1][2][1][RTW89_IC][44] = 80,
+ [0][1][2][1][RTW89_ACMA][44] = 78,
+ [0][1][2][1][RTW89_FCC][46] = 80,
+ [0][1][2][1][RTW89_ETSI][46] = 8,
+ [0][1][2][1][RTW89_MKK][46] = 127,
+ [0][1][2][1][RTW89_IC][46] = 80,
+ [0][1][2][1][RTW89_ACMA][46] = 78,
+ [0][1][2][1][RTW89_FCC][48] = 60,
+ [0][1][2][1][RTW89_ETSI][48] = 127,
+ [0][1][2][1][RTW89_MKK][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_ACMA][48] = 127,
+ [0][1][2][1][RTW89_FCC][50] = 60,
+ [0][1][2][1][RTW89_ETSI][50] = 127,
+ [0][1][2][1][RTW89_MKK][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_ACMA][50] = 127,
+ [0][1][2][1][RTW89_FCC][52] = 60,
+ [0][1][2][1][RTW89_ETSI][52] = 127,
+ [0][1][2][1][RTW89_MKK][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_ACMA][52] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 68,
+ [1][0][2][0][RTW89_ETSI][1] = 66,
+ [1][0][2][0][RTW89_MKK][1] = 72,
+ [1][0][2][0][RTW89_IC][1] = 72,
+ [1][0][2][0][RTW89_ACMA][1] = 72,
+ [1][0][2][0][RTW89_FCC][5] = 80,
+ [1][0][2][0][RTW89_ETSI][5] = 66,
+ [1][0][2][0][RTW89_MKK][5] = 72,
+ [1][0][2][0][RTW89_IC][5] = 72,
+ [1][0][2][0][RTW89_ACMA][5] = 72,
+ [1][0][2][0][RTW89_FCC][9] = 80,
+ [1][0][2][0][RTW89_ETSI][9] = 66,
+ [1][0][2][0][RTW89_MKK][9] = 72,
+ [1][0][2][0][RTW89_IC][9] = 72,
+ [1][0][2][0][RTW89_ACMA][9] = 72,
+ [1][0][2][0][RTW89_FCC][13] = 68,
+ [1][0][2][0][RTW89_ETSI][13] = 66,
+ [1][0][2][0][RTW89_MKK][13] = 72,
+ [1][0][2][0][RTW89_IC][13] = 72,
+ [1][0][2][0][RTW89_ACMA][13] = 72,
+ [1][0][2][0][RTW89_FCC][16] = 66,
+ [1][0][2][0][RTW89_ETSI][16] = 66,
+ [1][0][2][0][RTW89_MKK][16] = 76,
+ [1][0][2][0][RTW89_IC][16] = 72,
+ [1][0][2][0][RTW89_ACMA][16] = 72,
+ [1][0][2][0][RTW89_FCC][20] = 80,
+ [1][0][2][0][RTW89_ETSI][20] = 66,
+ [1][0][2][0][RTW89_MKK][20] = 76,
+ [1][0][2][0][RTW89_IC][20] = 80,
+ [1][0][2][0][RTW89_ACMA][20] = 72,
+ [1][0][2][0][RTW89_FCC][24] = 80,
+ [1][0][2][0][RTW89_ETSI][24] = 66,
+ [1][0][2][0][RTW89_MKK][24] = 76,
+ [1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_FCC][28] = 80,
+ [1][0][2][0][RTW89_ETSI][28] = 66,
+ [1][0][2][0][RTW89_MKK][28] = 76,
+ [1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_ACMA][28] = 127,
+ [1][0][2][0][RTW89_FCC][32] = 78,
+ [1][0][2][0][RTW89_ETSI][32] = 66,
+ [1][0][2][0][RTW89_MKK][32] = 76,
+ [1][0][2][0][RTW89_IC][32] = 78,
+ [1][0][2][0][RTW89_ACMA][32] = 66,
+ [1][0][2][0][RTW89_FCC][36] = 80,
+ [1][0][2][0][RTW89_ETSI][36] = 127,
+ [1][0][2][0][RTW89_MKK][36] = 76,
+ [1][0][2][0][RTW89_IC][36] = 80,
+ [1][0][2][0][RTW89_ACMA][36] = 76,
+ [1][0][2][0][RTW89_FCC][39] = 80,
+ [1][0][2][0][RTW89_ETSI][39] = 30,
+ [1][0][2][0][RTW89_MKK][39] = 127,
+ [1][0][2][0][RTW89_IC][39] = 80,
+ [1][0][2][0][RTW89_ACMA][39] = 76,
+ [1][0][2][0][RTW89_FCC][43] = 80,
+ [1][0][2][0][RTW89_ETSI][43] = 30,
+ [1][0][2][0][RTW89_MKK][43] = 127,
+ [1][0][2][0][RTW89_IC][43] = 80,
+ [1][0][2][0][RTW89_ACMA][43] = 76,
+ [1][0][2][0][RTW89_FCC][47] = 80,
+ [1][0][2][0][RTW89_ETSI][47] = 127,
+ [1][0][2][0][RTW89_MKK][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_ACMA][47] = 127,
+ [1][0][2][0][RTW89_FCC][51] = 72,
+ [1][0][2][0][RTW89_ETSI][51] = 127,
+ [1][0][2][0][RTW89_MKK][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 64,
+ [1][1][2][0][RTW89_ETSI][1] = 54,
+ [1][1][2][0][RTW89_MKK][1] = 60,
+ [1][1][2][0][RTW89_IC][1] = 60,
+ [1][1][2][0][RTW89_ACMA][1] = 60,
+ [1][1][2][0][RTW89_FCC][5] = 78,
+ [1][1][2][0][RTW89_ETSI][5] = 54,
+ [1][1][2][0][RTW89_MKK][5] = 60,
+ [1][1][2][0][RTW89_IC][5] = 60,
+ [1][1][2][0][RTW89_ACMA][5] = 60,
+ [1][1][2][0][RTW89_FCC][9] = 78,
+ [1][1][2][0][RTW89_ETSI][9] = 54,
+ [1][1][2][0][RTW89_MKK][9] = 60,
+ [1][1][2][0][RTW89_IC][9] = 60,
+ [1][1][2][0][RTW89_ACMA][9] = 60,
+ [1][1][2][0][RTW89_FCC][13] = 64,
+ [1][1][2][0][RTW89_ETSI][13] = 54,
+ [1][1][2][0][RTW89_MKK][13] = 60,
+ [1][1][2][0][RTW89_IC][13] = 60,
+ [1][1][2][0][RTW89_ACMA][13] = 60,
+ [1][1][2][0][RTW89_FCC][16] = 58,
+ [1][1][2][0][RTW89_ETSI][16] = 54,
+ [1][1][2][0][RTW89_MKK][16] = 72,
+ [1][1][2][0][RTW89_IC][16] = 58,
+ [1][1][2][0][RTW89_ACMA][16] = 60,
+ [1][1][2][0][RTW89_FCC][20] = 78,
+ [1][1][2][0][RTW89_ETSI][20] = 54,
+ [1][1][2][0][RTW89_MKK][20] = 72,
+ [1][1][2][0][RTW89_IC][20] = 78,
+ [1][1][2][0][RTW89_ACMA][20] = 60,
+ [1][1][2][0][RTW89_FCC][24] = 78,
+ [1][1][2][0][RTW89_ETSI][24] = 54,
+ [1][1][2][0][RTW89_MKK][24] = 72,
+ [1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_FCC][28] = 78,
+ [1][1][2][0][RTW89_ETSI][28] = 54,
+ [1][1][2][0][RTW89_MKK][28] = 72,
+ [1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_ACMA][28] = 127,
+ [1][1][2][0][RTW89_FCC][32] = 70,
+ [1][1][2][0][RTW89_ETSI][32] = 54,
+ [1][1][2][0][RTW89_MKK][32] = 72,
+ [1][1][2][0][RTW89_IC][32] = 70,
+ [1][1][2][0][RTW89_ACMA][32] = 54,
+ [1][1][2][0][RTW89_FCC][36] = 78,
+ [1][1][2][0][RTW89_ETSI][36] = 127,
+ [1][1][2][0][RTW89_MKK][36] = 72,
+ [1][1][2][0][RTW89_IC][36] = 78,
+ [1][1][2][0][RTW89_ACMA][36] = 76,
+ [1][1][2][0][RTW89_FCC][39] = 80,
+ [1][1][2][0][RTW89_ETSI][39] = 18,
+ [1][1][2][0][RTW89_MKK][39] = 127,
+ [1][1][2][0][RTW89_IC][39] = 80,
+ [1][1][2][0][RTW89_ACMA][39] = 74,
+ [1][1][2][0][RTW89_FCC][43] = 80,
+ [1][1][2][0][RTW89_ETSI][43] = 18,
+ [1][1][2][0][RTW89_MKK][43] = 127,
+ [1][1][2][0][RTW89_IC][43] = 80,
+ [1][1][2][0][RTW89_ACMA][43] = 76,
+ [1][1][2][0][RTW89_FCC][47] = 70,
+ [1][1][2][0][RTW89_ETSI][47] = 127,
+ [1][1][2][0][RTW89_MKK][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_ACMA][47] = 127,
+ [1][1][2][0][RTW89_FCC][51] = 68,
+ [1][1][2][0][RTW89_ETSI][51] = 127,
+ [1][1][2][0][RTW89_MKK][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 64,
+ [1][1][2][1][RTW89_ETSI][1] = 42,
+ [1][1][2][1][RTW89_MKK][1] = 60,
+ [1][1][2][1][RTW89_IC][1] = 48,
+ [1][1][2][1][RTW89_ACMA][1] = 48,
+ [1][1][2][1][RTW89_FCC][5] = 70,
+ [1][1][2][1][RTW89_ETSI][5] = 42,
+ [1][1][2][1][RTW89_MKK][5] = 60,
+ [1][1][2][1][RTW89_IC][5] = 48,
+ [1][1][2][1][RTW89_ACMA][5] = 48,
+ [1][1][2][1][RTW89_FCC][9] = 70,
+ [1][1][2][1][RTW89_ETSI][9] = 42,
+ [1][1][2][1][RTW89_MKK][9] = 60,
+ [1][1][2][1][RTW89_IC][9] = 48,
+ [1][1][2][1][RTW89_ACMA][9] = 48,
+ [1][1][2][1][RTW89_FCC][13] = 64,
+ [1][1][2][1][RTW89_ETSI][13] = 42,
+ [1][1][2][1][RTW89_MKK][13] = 60,
+ [1][1][2][1][RTW89_IC][13] = 48,
+ [1][1][2][1][RTW89_ACMA][13] = 48,
+ [1][1][2][1][RTW89_FCC][16] = 58,
+ [1][1][2][1][RTW89_ETSI][16] = 42,
+ [1][1][2][1][RTW89_MKK][16] = 72,
+ [1][1][2][1][RTW89_IC][16] = 58,
+ [1][1][2][1][RTW89_ACMA][16] = 48,
+ [1][1][2][1][RTW89_FCC][20] = 70,
+ [1][1][2][1][RTW89_ETSI][20] = 42,
+ [1][1][2][1][RTW89_MKK][20] = 72,
+ [1][1][2][1][RTW89_IC][20] = 70,
+ [1][1][2][1][RTW89_ACMA][20] = 48,
+ [1][1][2][1][RTW89_FCC][24] = 70,
+ [1][1][2][1][RTW89_ETSI][24] = 42,
+ [1][1][2][1][RTW89_MKK][24] = 72,
+ [1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_ACMA][24] = 127,
+ [1][1][2][1][RTW89_FCC][28] = 70,
+ [1][1][2][1][RTW89_ETSI][28] = 42,
+ [1][1][2][1][RTW89_MKK][28] = 72,
+ [1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_ACMA][28] = 127,
+ [1][1][2][1][RTW89_FCC][32] = 70,
+ [1][1][2][1][RTW89_ETSI][32] = 42,
+ [1][1][2][1][RTW89_MKK][32] = 72,
+ [1][1][2][1][RTW89_IC][32] = 70,
+ [1][1][2][1][RTW89_ACMA][32] = 42,
+ [1][1][2][1][RTW89_FCC][36] = 70,
+ [1][1][2][1][RTW89_ETSI][36] = 127,
+ [1][1][2][1][RTW89_MKK][36] = 72,
+ [1][1][2][1][RTW89_IC][36] = 70,
+ [1][1][2][1][RTW89_ACMA][36] = 72,
+ [1][1][2][1][RTW89_FCC][39] = 80,
+ [1][1][2][1][RTW89_ETSI][39] = 8,
+ [1][1][2][1][RTW89_MKK][39] = 127,
+ [1][1][2][1][RTW89_IC][39] = 80,
+ [1][1][2][1][RTW89_ACMA][39] = 74,
+ [1][1][2][1][RTW89_FCC][43] = 80,
+ [1][1][2][1][RTW89_ETSI][43] = 8,
+ [1][1][2][1][RTW89_MKK][43] = 127,
+ [1][1][2][1][RTW89_IC][43] = 80,
+ [1][1][2][1][RTW89_ACMA][43] = 76,
+ [1][1][2][1][RTW89_FCC][47] = 70,
+ [1][1][2][1][RTW89_ETSI][47] = 127,
+ [1][1][2][1][RTW89_MKK][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_ACMA][47] = 127,
+ [1][1][2][1][RTW89_FCC][51] = 68,
+ [1][1][2][1][RTW89_ETSI][51] = 127,
+ [1][1][2][1][RTW89_MKK][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_ACMA][51] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 66,
+ [2][0][2][0][RTW89_ETSI][3] = 66,
+ [2][0][2][0][RTW89_MKK][3] = 66,
+ [2][0][2][0][RTW89_IC][3] = 64,
+ [2][0][2][0][RTW89_ACMA][3] = 66,
+ [2][0][2][0][RTW89_FCC][11] = 68,
+ [2][0][2][0][RTW89_ETSI][11] = 66,
+ [2][0][2][0][RTW89_MKK][11] = 66,
+ [2][0][2][0][RTW89_IC][11] = 66,
+ [2][0][2][0][RTW89_ACMA][11] = 66,
+ [2][0][2][0][RTW89_FCC][18] = 64,
+ [2][0][2][0][RTW89_ETSI][18] = 66,
+ [2][0][2][0][RTW89_MKK][18] = 72,
+ [2][0][2][0][RTW89_IC][18] = 64,
+ [2][0][2][0][RTW89_ACMA][18] = 66,
+ [2][0][2][0][RTW89_FCC][26] = 76,
+ [2][0][2][0][RTW89_ETSI][26] = 66,
+ [2][0][2][0][RTW89_MKK][26] = 72,
+ [2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_FCC][34] = 76,
+ [2][0][2][0][RTW89_ETSI][34] = 127,
+ [2][0][2][0][RTW89_MKK][34] = 72,
+ [2][0][2][0][RTW89_IC][34] = 76,
+ [2][0][2][0][RTW89_ACMA][34] = 72,
+ [2][0][2][0][RTW89_FCC][41] = 76,
+ [2][0][2][0][RTW89_ETSI][41] = 30,
+ [2][0][2][0][RTW89_MKK][41] = 127,
+ [2][0][2][0][RTW89_IC][41] = 76,
+ [2][0][2][0][RTW89_ACMA][41] = 72,
+ [2][0][2][0][RTW89_FCC][49] = 66,
+ [2][0][2][0][RTW89_ETSI][49] = 127,
+ [2][0][2][0][RTW89_MKK][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 58,
+ [2][1][2][0][RTW89_ETSI][3] = 54,
+ [2][1][2][0][RTW89_MKK][3] = 54,
+ [2][1][2][0][RTW89_IC][3] = 54,
+ [2][1][2][0][RTW89_ACMA][3] = 54,
+ [2][1][2][0][RTW89_FCC][11] = 64,
+ [2][1][2][0][RTW89_ETSI][11] = 54,
+ [2][1][2][0][RTW89_MKK][11] = 54,
+ [2][1][2][0][RTW89_IC][11] = 54,
+ [2][1][2][0][RTW89_ACMA][11] = 54,
+ [2][1][2][0][RTW89_FCC][18] = 58,
+ [2][1][2][0][RTW89_ETSI][18] = 54,
+ [2][1][2][0][RTW89_MKK][18] = 72,
+ [2][1][2][0][RTW89_IC][18] = 58,
+ [2][1][2][0][RTW89_ACMA][18] = 54,
+ [2][1][2][0][RTW89_FCC][26] = 72,
+ [2][1][2][0][RTW89_ETSI][26] = 54,
+ [2][1][2][0][RTW89_MKK][26] = 72,
+ [2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_FCC][34] = 76,
+ [2][1][2][0][RTW89_ETSI][34] = 127,
+ [2][1][2][0][RTW89_MKK][34] = 72,
+ [2][1][2][0][RTW89_IC][34] = 76,
+ [2][1][2][0][RTW89_ACMA][34] = 72,
+ [2][1][2][0][RTW89_FCC][41] = 76,
+ [2][1][2][0][RTW89_ETSI][41] = 18,
+ [2][1][2][0][RTW89_MKK][41] = 127,
+ [2][1][2][0][RTW89_IC][41] = 76,
+ [2][1][2][0][RTW89_ACMA][41] = 72,
+ [2][1][2][0][RTW89_FCC][49] = 60,
+ [2][1][2][0][RTW89_ETSI][49] = 127,
+ [2][1][2][0][RTW89_MKK][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 58,
+ [2][1][2][1][RTW89_ETSI][3] = 42,
+ [2][1][2][1][RTW89_MKK][3] = 54,
+ [2][1][2][1][RTW89_IC][3] = 42,
+ [2][1][2][1][RTW89_ACMA][3] = 42,
+ [2][1][2][1][RTW89_FCC][11] = 64,
+ [2][1][2][1][RTW89_ETSI][11] = 42,
+ [2][1][2][1][RTW89_MKK][11] = 54,
+ [2][1][2][1][RTW89_IC][11] = 42,
+ [2][1][2][1][RTW89_ACMA][11] = 42,
+ [2][1][2][1][RTW89_FCC][18] = 58,
+ [2][1][2][1][RTW89_ETSI][18] = 42,
+ [2][1][2][1][RTW89_MKK][18] = 72,
+ [2][1][2][1][RTW89_IC][18] = 58,
+ [2][1][2][1][RTW89_ACMA][18] = 42,
+ [2][1][2][1][RTW89_FCC][26] = 70,
+ [2][1][2][1][RTW89_ETSI][26] = 44,
+ [2][1][2][1][RTW89_MKK][26] = 72,
+ [2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_ACMA][26] = 127,
+ [2][1][2][1][RTW89_FCC][34] = 70,
+ [2][1][2][1][RTW89_ETSI][34] = 127,
+ [2][1][2][1][RTW89_MKK][34] = 72,
+ [2][1][2][1][RTW89_IC][34] = 70,
+ [2][1][2][1][RTW89_ACMA][34] = 72,
+ [2][1][2][1][RTW89_FCC][41] = 76,
+ [2][1][2][1][RTW89_ETSI][41] = 8,
+ [2][1][2][1][RTW89_MKK][41] = 127,
+ [2][1][2][1][RTW89_IC][41] = 76,
+ [2][1][2][1][RTW89_ACMA][41] = 72,
+ [2][1][2][1][RTW89_FCC][49] = 60,
+ [2][1][2][1][RTW89_ETSI][49] = 127,
+ [2][1][2][1][RTW89_MKK][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_ACMA][49] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 56,
+ [3][0][2][0][RTW89_ETSI][7] = 56,
+ [3][0][2][0][RTW89_MKK][7] = 56,
+ [3][0][2][0][RTW89_IC][7] = 56,
+ [3][0][2][0][RTW89_ACMA][7] = 56,
+ [3][0][2][0][RTW89_FCC][22] = 56,
+ [3][0][2][0][RTW89_ETSI][22] = 56,
+ [3][0][2][0][RTW89_MKK][22] = 56,
+ [3][0][2][0][RTW89_IC][22] = 56,
+ [3][0][2][0][RTW89_ACMA][22] = 56,
+ [3][0][2][0][RTW89_FCC][45] = 56,
+ [3][0][2][0][RTW89_ETSI][45] = 127,
+ [3][0][2][0][RTW89_MKK][45] = 127,
+ [3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_ACMA][45] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 44,
+ [3][1][2][0][RTW89_ETSI][7] = 44,
+ [3][1][2][0][RTW89_MKK][7] = 44,
+ [3][1][2][0][RTW89_IC][7] = 44,
+ [3][1][2][0][RTW89_ACMA][7] = 44,
+ [3][1][2][0][RTW89_FCC][22] = 44,
+ [3][1][2][0][RTW89_ETSI][22] = 44,
+ [3][1][2][0][RTW89_MKK][22] = 44,
+ [3][1][2][0][RTW89_IC][22] = 44,
+ [3][1][2][0][RTW89_ACMA][22] = 44,
+ [3][1][2][0][RTW89_FCC][45] = 44,
+ [3][1][2][0][RTW89_ETSI][45] = 127,
+ [3][1][2][0][RTW89_MKK][45] = 127,
+ [3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_ACMA][45] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
+ [3][1][2][1][RTW89_ETSI][7] = 32,
+ [3][1][2][1][RTW89_MKK][7] = 32,
+ [3][1][2][1][RTW89_IC][7] = 32,
+ [3][1][2][1][RTW89_ACMA][7] = 32,
+ [3][1][2][1][RTW89_FCC][22] = 32,
+ [3][1][2][1][RTW89_ETSI][22] = 32,
+ [3][1][2][1][RTW89_MKK][22] = 32,
+ [3][1][2][1][RTW89_IC][22] = 32,
+ [3][1][2][1][RTW89_ACMA][22] = 32,
+ [3][1][2][1][RTW89_FCC][45] = 32,
+ [3][1][2][1][RTW89_ETSI][45] = 127,
+ [3][1][2][1][RTW89_MKK][45] = 127,
+ [3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_ACMA][45] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
+ [0][0][1][0][RTW89_WW][0] = 72,
+ [0][0][1][0][RTW89_WW][2] = 72,
+ [0][0][1][0][RTW89_WW][4] = 72,
+ [0][0][1][0][RTW89_WW][6] = 72,
+ [0][0][1][0][RTW89_WW][8] = 72,
+ [0][0][1][0][RTW89_WW][10] = 72,
+ [0][0][1][0][RTW89_WW][12] = 72,
+ [0][0][1][0][RTW89_WW][14] = 72,
+ [0][0][1][0][RTW89_WW][15] = 72,
+ [0][0][1][0][RTW89_WW][17] = 72,
+ [0][0][1][0][RTW89_WW][19] = 72,
+ [0][0][1][0][RTW89_WW][21] = 72,
+ [0][0][1][0][RTW89_WW][23] = 72,
+ [0][0][1][0][RTW89_WW][25] = 72,
+ [0][0][1][0][RTW89_WW][27] = 72,
+ [0][0][1][0][RTW89_WW][29] = 72,
+ [0][0][1][0][RTW89_WW][30] = 72,
+ [0][0][1][0][RTW89_WW][32] = 72,
+ [0][0][1][0][RTW89_WW][34] = 72,
+ [0][0][1][0][RTW89_WW][36] = 72,
+ [0][0][1][0][RTW89_WW][38] = 72,
+ [0][0][1][0][RTW89_WW][40] = 72,
+ [0][0][1][0][RTW89_WW][42] = 72,
+ [0][0][1][0][RTW89_WW][44] = 72,
+ [0][0][1][0][RTW89_WW][45] = 72,
+ [0][0][1][0][RTW89_WW][47] = 72,
+ [0][0][1][0][RTW89_WW][49] = 72,
+ [0][0][1][0][RTW89_WW][51] = 72,
+ [0][0][1][0][RTW89_WW][53] = 72,
+ [0][0][1][0][RTW89_WW][55] = 72,
+ [0][0][1][0][RTW89_WW][57] = 72,
+ [0][0][1][0][RTW89_WW][59] = 72,
+ [0][0][1][0][RTW89_WW][60] = 72,
+ [0][0][1][0][RTW89_WW][62] = 72,
+ [0][0][1][0][RTW89_WW][64] = 72,
+ [0][0][1][0][RTW89_WW][66] = 72,
+ [0][0][1][0][RTW89_WW][68] = 72,
+ [0][0][1][0][RTW89_WW][70] = 72,
+ [0][0][1][0][RTW89_WW][72] = 72,
+ [0][0][1][0][RTW89_WW][74] = 72,
+ [0][0][1][0][RTW89_WW][75] = 72,
+ [0][0][1][0][RTW89_WW][77] = 72,
+ [0][0][1][0][RTW89_WW][79] = 72,
+ [0][0][1][0][RTW89_WW][81] = 72,
+ [0][0][1][0][RTW89_WW][83] = 72,
+ [0][0][1][0][RTW89_WW][85] = 72,
+ [0][0][1][0][RTW89_WW][87] = 72,
+ [0][0][1][0][RTW89_WW][89] = 72,
+ [0][0][1][0][RTW89_WW][90] = 72,
+ [0][0][1][0][RTW89_WW][92] = 72,
+ [0][0][1][0][RTW89_WW][94] = 72,
+ [0][0][1][0][RTW89_WW][96] = 72,
+ [0][0][1][0][RTW89_WW][98] = 72,
+ [0][0][1][0][RTW89_WW][100] = 72,
+ [0][0][1][0][RTW89_WW][102] = 72,
+ [0][0][1][0][RTW89_WW][104] = 72,
+ [0][0][1][0][RTW89_WW][105] = 72,
+ [0][0][1][0][RTW89_WW][107] = 72,
+ [0][0][1][0][RTW89_WW][109] = 72,
+ [0][0][1][0][RTW89_WW][111] = 0,
+ [0][0][1][0][RTW89_WW][113] = 0,
+ [0][0][1][0][RTW89_WW][115] = 0,
+ [0][0][1][0][RTW89_WW][117] = 0,
+ [0][0][1][0][RTW89_WW][119] = 0,
+ [0][1][1][0][RTW89_WW][0] = 60,
+ [0][1][1][0][RTW89_WW][2] = 60,
+ [0][1][1][0][RTW89_WW][4] = 60,
+ [0][1][1][0][RTW89_WW][6] = 60,
+ [0][1][1][0][RTW89_WW][8] = 60,
+ [0][1][1][0][RTW89_WW][10] = 60,
+ [0][1][1][0][RTW89_WW][12] = 60,
+ [0][1][1][0][RTW89_WW][14] = 60,
+ [0][1][1][0][RTW89_WW][15] = 60,
+ [0][1][1][0][RTW89_WW][17] = 60,
+ [0][1][1][0][RTW89_WW][19] = 60,
+ [0][1][1][0][RTW89_WW][21] = 60,
+ [0][1][1][0][RTW89_WW][23] = 60,
+ [0][1][1][0][RTW89_WW][25] = 60,
+ [0][1][1][0][RTW89_WW][27] = 60,
+ [0][1][1][0][RTW89_WW][29] = 60,
+ [0][1][1][0][RTW89_WW][30] = 60,
+ [0][1][1][0][RTW89_WW][32] = 60,
+ [0][1][1][0][RTW89_WW][34] = 60,
+ [0][1][1][0][RTW89_WW][36] = 60,
+ [0][1][1][0][RTW89_WW][38] = 60,
+ [0][1][1][0][RTW89_WW][40] = 60,
+ [0][1][1][0][RTW89_WW][42] = 60,
+ [0][1][1][0][RTW89_WW][44] = 60,
+ [0][1][1][0][RTW89_WW][45] = 60,
+ [0][1][1][0][RTW89_WW][47] = 60,
+ [0][1][1][0][RTW89_WW][49] = 60,
+ [0][1][1][0][RTW89_WW][51] = 60,
+ [0][1][1][0][RTW89_WW][53] = 60,
+ [0][1][1][0][RTW89_WW][55] = 60,
+ [0][1][1][0][RTW89_WW][57] = 60,
+ [0][1][1][0][RTW89_WW][59] = 60,
+ [0][1][1][0][RTW89_WW][60] = 60,
+ [0][1][1][0][RTW89_WW][62] = 60,
+ [0][1][1][0][RTW89_WW][64] = 60,
+ [0][1][1][0][RTW89_WW][66] = 60,
+ [0][1][1][0][RTW89_WW][68] = 60,
+ [0][1][1][0][RTW89_WW][70] = 60,
+ [0][1][1][0][RTW89_WW][72] = 60,
+ [0][1][1][0][RTW89_WW][74] = 60,
+ [0][1][1][0][RTW89_WW][75] = 60,
+ [0][1][1][0][RTW89_WW][77] = 60,
+ [0][1][1][0][RTW89_WW][79] = 60,
+ [0][1][1][0][RTW89_WW][81] = 60,
+ [0][1][1][0][RTW89_WW][83] = 60,
+ [0][1][1][0][RTW89_WW][85] = 60,
+ [0][1][1][0][RTW89_WW][87] = 60,
+ [0][1][1][0][RTW89_WW][89] = 60,
+ [0][1][1][0][RTW89_WW][90] = 60,
+ [0][1][1][0][RTW89_WW][92] = 60,
+ [0][1][1][0][RTW89_WW][94] = 60,
+ [0][1][1][0][RTW89_WW][96] = 60,
+ [0][1][1][0][RTW89_WW][98] = 60,
+ [0][1][1][0][RTW89_WW][100] = 60,
+ [0][1][1][0][RTW89_WW][102] = 60,
+ [0][1][1][0][RTW89_WW][104] = 60,
+ [0][1][1][0][RTW89_WW][105] = 60,
+ [0][1][1][0][RTW89_WW][107] = 60,
+ [0][1][1][0][RTW89_WW][109] = 60,
+ [0][1][1][0][RTW89_WW][111] = 0,
+ [0][1][1][0][RTW89_WW][113] = 0,
+ [0][1][1][0][RTW89_WW][115] = 0,
+ [0][1][1][0][RTW89_WW][117] = 0,
+ [0][1][1][0][RTW89_WW][119] = 0,
+ [0][0][2][0][RTW89_WW][0] = 72,
+ [0][0][2][0][RTW89_WW][2] = 72,
+ [0][0][2][0][RTW89_WW][4] = 72,
+ [0][0][2][0][RTW89_WW][6] = 72,
+ [0][0][2][0][RTW89_WW][8] = 72,
+ [0][0][2][0][RTW89_WW][10] = 72,
+ [0][0][2][0][RTW89_WW][12] = 72,
+ [0][0][2][0][RTW89_WW][14] = 72,
+ [0][0][2][0][RTW89_WW][15] = 72,
+ [0][0][2][0][RTW89_WW][17] = 72,
+ [0][0][2][0][RTW89_WW][19] = 72,
+ [0][0][2][0][RTW89_WW][21] = 72,
+ [0][0][2][0][RTW89_WW][23] = 72,
+ [0][0][2][0][RTW89_WW][25] = 72,
+ [0][0][2][0][RTW89_WW][27] = 72,
+ [0][0][2][0][RTW89_WW][29] = 72,
+ [0][0][2][0][RTW89_WW][30] = 72,
+ [0][0][2][0][RTW89_WW][32] = 72,
+ [0][0][2][0][RTW89_WW][34] = 72,
+ [0][0][2][0][RTW89_WW][36] = 72,
+ [0][0][2][0][RTW89_WW][38] = 72,
+ [0][0][2][0][RTW89_WW][40] = 72,
+ [0][0][2][0][RTW89_WW][42] = 72,
+ [0][0][2][0][RTW89_WW][44] = 72,
+ [0][0][2][0][RTW89_WW][45] = 72,
+ [0][0][2][0][RTW89_WW][47] = 72,
+ [0][0][2][0][RTW89_WW][49] = 72,
+ [0][0][2][0][RTW89_WW][51] = 72,
+ [0][0][2][0][RTW89_WW][53] = 72,
+ [0][0][2][0][RTW89_WW][55] = 72,
+ [0][0][2][0][RTW89_WW][57] = 72,
+ [0][0][2][0][RTW89_WW][59] = 72,
+ [0][0][2][0][RTW89_WW][60] = 72,
+ [0][0][2][0][RTW89_WW][62] = 72,
+ [0][0][2][0][RTW89_WW][64] = 72,
+ [0][0][2][0][RTW89_WW][66] = 72,
+ [0][0][2][0][RTW89_WW][68] = 72,
+ [0][0][2][0][RTW89_WW][70] = 72,
+ [0][0][2][0][RTW89_WW][72] = 72,
+ [0][0][2][0][RTW89_WW][74] = 72,
+ [0][0][2][0][RTW89_WW][75] = 72,
+ [0][0][2][0][RTW89_WW][77] = 72,
+ [0][0][2][0][RTW89_WW][79] = 72,
+ [0][0][2][0][RTW89_WW][81] = 72,
+ [0][0][2][0][RTW89_WW][83] = 72,
+ [0][0][2][0][RTW89_WW][85] = 72,
+ [0][0][2][0][RTW89_WW][87] = 72,
+ [0][0][2][0][RTW89_WW][89] = 72,
+ [0][0][2][0][RTW89_WW][90] = 72,
+ [0][0][2][0][RTW89_WW][92] = 72,
+ [0][0][2][0][RTW89_WW][94] = 72,
+ [0][0][2][0][RTW89_WW][96] = 72,
+ [0][0][2][0][RTW89_WW][98] = 72,
+ [0][0][2][0][RTW89_WW][100] = 72,
+ [0][0][2][0][RTW89_WW][102] = 72,
+ [0][0][2][0][RTW89_WW][104] = 72,
+ [0][0][2][0][RTW89_WW][105] = 72,
+ [0][0][2][0][RTW89_WW][107] = 72,
+ [0][0][2][0][RTW89_WW][109] = 72,
+ [0][0][2][0][RTW89_WW][111] = 0,
+ [0][0][2][0][RTW89_WW][113] = 0,
+ [0][0][2][0][RTW89_WW][115] = 0,
+ [0][0][2][0][RTW89_WW][117] = 0,
+ [0][0][2][0][RTW89_WW][119] = 0,
+ [0][1][2][0][RTW89_WW][0] = 60,
+ [0][1][2][0][RTW89_WW][2] = 60,
+ [0][1][2][0][RTW89_WW][4] = 60,
+ [0][1][2][0][RTW89_WW][6] = 60,
+ [0][1][2][0][RTW89_WW][8] = 60,
+ [0][1][2][0][RTW89_WW][10] = 60,
+ [0][1][2][0][RTW89_WW][12] = 60,
+ [0][1][2][0][RTW89_WW][14] = 60,
+ [0][1][2][0][RTW89_WW][15] = 60,
+ [0][1][2][0][RTW89_WW][17] = 60,
+ [0][1][2][0][RTW89_WW][19] = 60,
+ [0][1][2][0][RTW89_WW][21] = 60,
+ [0][1][2][0][RTW89_WW][23] = 60,
+ [0][1][2][0][RTW89_WW][25] = 60,
+ [0][1][2][0][RTW89_WW][27] = 60,
+ [0][1][2][0][RTW89_WW][29] = 60,
+ [0][1][2][0][RTW89_WW][30] = 60,
+ [0][1][2][0][RTW89_WW][32] = 60,
+ [0][1][2][0][RTW89_WW][34] = 60,
+ [0][1][2][0][RTW89_WW][36] = 60,
+ [0][1][2][0][RTW89_WW][38] = 60,
+ [0][1][2][0][RTW89_WW][40] = 60,
+ [0][1][2][0][RTW89_WW][42] = 60,
+ [0][1][2][0][RTW89_WW][44] = 60,
+ [0][1][2][0][RTW89_WW][45] = 60,
+ [0][1][2][0][RTW89_WW][47] = 60,
+ [0][1][2][0][RTW89_WW][49] = 60,
+ [0][1][2][0][RTW89_WW][51] = 60,
+ [0][1][2][0][RTW89_WW][53] = 60,
+ [0][1][2][0][RTW89_WW][55] = 60,
+ [0][1][2][0][RTW89_WW][57] = 60,
+ [0][1][2][0][RTW89_WW][59] = 60,
+ [0][1][2][0][RTW89_WW][60] = 60,
+ [0][1][2][0][RTW89_WW][62] = 60,
+ [0][1][2][0][RTW89_WW][64] = 60,
+ [0][1][2][0][RTW89_WW][66] = 60,
+ [0][1][2][0][RTW89_WW][68] = 60,
+ [0][1][2][0][RTW89_WW][70] = 60,
+ [0][1][2][0][RTW89_WW][72] = 60,
+ [0][1][2][0][RTW89_WW][74] = 60,
+ [0][1][2][0][RTW89_WW][75] = 60,
+ [0][1][2][0][RTW89_WW][77] = 60,
+ [0][1][2][0][RTW89_WW][79] = 60,
+ [0][1][2][0][RTW89_WW][81] = 60,
+ [0][1][2][0][RTW89_WW][83] = 60,
+ [0][1][2][0][RTW89_WW][85] = 60,
+ [0][1][2][0][RTW89_WW][87] = 60,
+ [0][1][2][0][RTW89_WW][89] = 60,
+ [0][1][2][0][RTW89_WW][90] = 60,
+ [0][1][2][0][RTW89_WW][92] = 60,
+ [0][1][2][0][RTW89_WW][94] = 60,
+ [0][1][2][0][RTW89_WW][96] = 60,
+ [0][1][2][0][RTW89_WW][98] = 60,
+ [0][1][2][0][RTW89_WW][100] = 60,
+ [0][1][2][0][RTW89_WW][102] = 60,
+ [0][1][2][0][RTW89_WW][104] = 60,
+ [0][1][2][0][RTW89_WW][105] = 60,
+ [0][1][2][0][RTW89_WW][107] = 60,
+ [0][1][2][0][RTW89_WW][109] = 60,
+ [0][1][2][0][RTW89_WW][111] = 0,
+ [0][1][2][0][RTW89_WW][113] = 0,
+ [0][1][2][0][RTW89_WW][115] = 0,
+ [0][1][2][0][RTW89_WW][117] = 0,
+ [0][1][2][0][RTW89_WW][119] = 0,
+ [0][1][2][1][RTW89_WW][0] = 48,
+ [0][1][2][1][RTW89_WW][2] = 48,
+ [0][1][2][1][RTW89_WW][4] = 48,
+ [0][1][2][1][RTW89_WW][6] = 48,
+ [0][1][2][1][RTW89_WW][8] = 48,
+ [0][1][2][1][RTW89_WW][10] = 48,
+ [0][1][2][1][RTW89_WW][12] = 48,
+ [0][1][2][1][RTW89_WW][14] = 48,
+ [0][1][2][1][RTW89_WW][15] = 48,
+ [0][1][2][1][RTW89_WW][17] = 48,
+ [0][1][2][1][RTW89_WW][19] = 48,
+ [0][1][2][1][RTW89_WW][21] = 48,
+ [0][1][2][1][RTW89_WW][23] = 48,
+ [0][1][2][1][RTW89_WW][25] = 48,
+ [0][1][2][1][RTW89_WW][27] = 48,
+ [0][1][2][1][RTW89_WW][29] = 48,
+ [0][1][2][1][RTW89_WW][30] = 48,
+ [0][1][2][1][RTW89_WW][32] = 48,
+ [0][1][2][1][RTW89_WW][34] = 48,
+ [0][1][2][1][RTW89_WW][36] = 48,
+ [0][1][2][1][RTW89_WW][38] = 48,
+ [0][1][2][1][RTW89_WW][40] = 48,
+ [0][1][2][1][RTW89_WW][42] = 48,
+ [0][1][2][1][RTW89_WW][44] = 48,
+ [0][1][2][1][RTW89_WW][45] = 48,
+ [0][1][2][1][RTW89_WW][47] = 48,
+ [0][1][2][1][RTW89_WW][49] = 48,
+ [0][1][2][1][RTW89_WW][51] = 48,
+ [0][1][2][1][RTW89_WW][53] = 48,
+ [0][1][2][1][RTW89_WW][55] = 48,
+ [0][1][2][1][RTW89_WW][57] = 48,
+ [0][1][2][1][RTW89_WW][59] = 48,
+ [0][1][2][1][RTW89_WW][60] = 48,
+ [0][1][2][1][RTW89_WW][62] = 48,
+ [0][1][2][1][RTW89_WW][64] = 48,
+ [0][1][2][1][RTW89_WW][66] = 48,
+ [0][1][2][1][RTW89_WW][68] = 48,
+ [0][1][2][1][RTW89_WW][70] = 48,
+ [0][1][2][1][RTW89_WW][72] = 48,
+ [0][1][2][1][RTW89_WW][74] = 48,
+ [0][1][2][1][RTW89_WW][75] = 48,
+ [0][1][2][1][RTW89_WW][77] = 48,
+ [0][1][2][1][RTW89_WW][79] = 48,
+ [0][1][2][1][RTW89_WW][81] = 48,
+ [0][1][2][1][RTW89_WW][83] = 48,
+ [0][1][2][1][RTW89_WW][85] = 48,
+ [0][1][2][1][RTW89_WW][87] = 48,
+ [0][1][2][1][RTW89_WW][89] = 48,
+ [0][1][2][1][RTW89_WW][90] = 48,
+ [0][1][2][1][RTW89_WW][92] = 48,
+ [0][1][2][1][RTW89_WW][94] = 48,
+ [0][1][2][1][RTW89_WW][96] = 48,
+ [0][1][2][1][RTW89_WW][98] = 48,
+ [0][1][2][1][RTW89_WW][100] = 48,
+ [0][1][2][1][RTW89_WW][102] = 48,
+ [0][1][2][1][RTW89_WW][104] = 48,
+ [0][1][2][1][RTW89_WW][105] = 48,
+ [0][1][2][1][RTW89_WW][107] = 48,
+ [0][1][2][1][RTW89_WW][109] = 48,
+ [0][1][2][1][RTW89_WW][111] = 0,
+ [0][1][2][1][RTW89_WW][113] = 0,
+ [0][1][2][1][RTW89_WW][115] = 0,
+ [0][1][2][1][RTW89_WW][117] = 0,
+ [0][1][2][1][RTW89_WW][119] = 0,
+ [1][0][2][0][RTW89_WW][1] = 72,
+ [1][0][2][0][RTW89_WW][5] = 72,
+ [1][0][2][0][RTW89_WW][9] = 72,
+ [1][0][2][0][RTW89_WW][13] = 72,
+ [1][0][2][0][RTW89_WW][16] = 72,
+ [1][0][2][0][RTW89_WW][20] = 72,
+ [1][0][2][0][RTW89_WW][24] = 72,
+ [1][0][2][0][RTW89_WW][28] = 72,
+ [1][0][2][0][RTW89_WW][31] = 72,
+ [1][0][2][0][RTW89_WW][35] = 72,
+ [1][0][2][0][RTW89_WW][39] = 72,
+ [1][0][2][0][RTW89_WW][43] = 72,
+ [1][0][2][0][RTW89_WW][46] = 72,
+ [1][0][2][0][RTW89_WW][50] = 72,
+ [1][0][2][0][RTW89_WW][54] = 72,
+ [1][0][2][0][RTW89_WW][58] = 72,
+ [1][0][2][0][RTW89_WW][61] = 72,
+ [1][0][2][0][RTW89_WW][65] = 72,
+ [1][0][2][0][RTW89_WW][69] = 72,
+ [1][0][2][0][RTW89_WW][73] = 72,
+ [1][0][2][0][RTW89_WW][76] = 72,
+ [1][0][2][0][RTW89_WW][80] = 72,
+ [1][0][2][0][RTW89_WW][84] = 72,
+ [1][0][2][0][RTW89_WW][88] = 72,
+ [1][0][2][0][RTW89_WW][91] = 72,
+ [1][0][2][0][RTW89_WW][95] = 72,
+ [1][0][2][0][RTW89_WW][99] = 72,
+ [1][0][2][0][RTW89_WW][103] = 72,
+ [1][0][2][0][RTW89_WW][106] = 72,
+ [1][0][2][0][RTW89_WW][110] = 0,
+ [1][0][2][0][RTW89_WW][114] = 0,
+ [1][0][2][0][RTW89_WW][118] = 0,
+ [1][1][2][0][RTW89_WW][1] = 60,
+ [1][1][2][0][RTW89_WW][5] = 60,
+ [1][1][2][0][RTW89_WW][9] = 60,
+ [1][1][2][0][RTW89_WW][13] = 60,
+ [1][1][2][0][RTW89_WW][16] = 60,
+ [1][1][2][0][RTW89_WW][20] = 60,
+ [1][1][2][0][RTW89_WW][24] = 60,
+ [1][1][2][0][RTW89_WW][28] = 60,
+ [1][1][2][0][RTW89_WW][31] = 60,
+ [1][1][2][0][RTW89_WW][35] = 60,
+ [1][1][2][0][RTW89_WW][39] = 60,
+ [1][1][2][0][RTW89_WW][43] = 60,
+ [1][1][2][0][RTW89_WW][46] = 60,
+ [1][1][2][0][RTW89_WW][50] = 60,
+ [1][1][2][0][RTW89_WW][54] = 60,
+ [1][1][2][0][RTW89_WW][58] = 60,
+ [1][1][2][0][RTW89_WW][61] = 60,
+ [1][1][2][0][RTW89_WW][65] = 60,
+ [1][1][2][0][RTW89_WW][69] = 60,
+ [1][1][2][0][RTW89_WW][73] = 60,
+ [1][1][2][0][RTW89_WW][76] = 60,
+ [1][1][2][0][RTW89_WW][80] = 60,
+ [1][1][2][0][RTW89_WW][84] = 60,
+ [1][1][2][0][RTW89_WW][88] = 60,
+ [1][1][2][0][RTW89_WW][91] = 60,
+ [1][1][2][0][RTW89_WW][95] = 60,
+ [1][1][2][0][RTW89_WW][99] = 60,
+ [1][1][2][0][RTW89_WW][103] = 60,
+ [1][1][2][0][RTW89_WW][106] = 60,
+ [1][1][2][0][RTW89_WW][110] = 0,
+ [1][1][2][0][RTW89_WW][114] = 0,
+ [1][1][2][0][RTW89_WW][118] = 0,
+ [1][1][2][1][RTW89_WW][1] = 48,
+ [1][1][2][1][RTW89_WW][5] = 48,
+ [1][1][2][1][RTW89_WW][9] = 48,
+ [1][1][2][1][RTW89_WW][13] = 48,
+ [1][1][2][1][RTW89_WW][16] = 48,
+ [1][1][2][1][RTW89_WW][20] = 48,
+ [1][1][2][1][RTW89_WW][24] = 48,
+ [1][1][2][1][RTW89_WW][28] = 48,
+ [1][1][2][1][RTW89_WW][31] = 48,
+ [1][1][2][1][RTW89_WW][35] = 48,
+ [1][1][2][1][RTW89_WW][39] = 48,
+ [1][1][2][1][RTW89_WW][43] = 48,
+ [1][1][2][1][RTW89_WW][46] = 48,
+ [1][1][2][1][RTW89_WW][50] = 48,
+ [1][1][2][1][RTW89_WW][54] = 48,
+ [1][1][2][1][RTW89_WW][58] = 48,
+ [1][1][2][1][RTW89_WW][61] = 48,
+ [1][1][2][1][RTW89_WW][65] = 48,
+ [1][1][2][1][RTW89_WW][69] = 48,
+ [1][1][2][1][RTW89_WW][73] = 48,
+ [1][1][2][1][RTW89_WW][76] = 48,
+ [1][1][2][1][RTW89_WW][80] = 48,
+ [1][1][2][1][RTW89_WW][84] = 48,
+ [1][1][2][1][RTW89_WW][88] = 48,
+ [1][1][2][1][RTW89_WW][91] = 48,
+ [1][1][2][1][RTW89_WW][95] = 48,
+ [1][1][2][1][RTW89_WW][99] = 48,
+ [1][1][2][1][RTW89_WW][103] = 48,
+ [1][1][2][1][RTW89_WW][106] = 48,
+ [1][1][2][1][RTW89_WW][110] = 0,
+ [1][1][2][1][RTW89_WW][114] = 0,
+ [1][1][2][1][RTW89_WW][118] = 0,
+ [2][0][2][0][RTW89_WW][3] = 64,
+ [2][0][2][0][RTW89_WW][11] = 64,
+ [2][0][2][0][RTW89_WW][18] = 64,
+ [2][0][2][0][RTW89_WW][26] = 64,
+ [2][0][2][0][RTW89_WW][33] = 64,
+ [2][0][2][0][RTW89_WW][41] = 64,
+ [2][0][2][0][RTW89_WW][48] = 64,
+ [2][0][2][0][RTW89_WW][56] = 64,
+ [2][0][2][0][RTW89_WW][63] = 64,
+ [2][0][2][0][RTW89_WW][71] = 64,
+ [2][0][2][0][RTW89_WW][78] = 64,
+ [2][0][2][0][RTW89_WW][86] = 64,
+ [2][0][2][0][RTW89_WW][93] = 64,
+ [2][0][2][0][RTW89_WW][101] = 64,
+ [2][0][2][0][RTW89_WW][108] = 0,
+ [2][0][2][0][RTW89_WW][116] = 0,
+ [2][1][2][0][RTW89_WW][3] = 52,
+ [2][1][2][0][RTW89_WW][11] = 52,
+ [2][1][2][0][RTW89_WW][18] = 52,
+ [2][1][2][0][RTW89_WW][26] = 52,
+ [2][1][2][0][RTW89_WW][33] = 52,
+ [2][1][2][0][RTW89_WW][41] = 52,
+ [2][1][2][0][RTW89_WW][48] = 52,
+ [2][1][2][0][RTW89_WW][56] = 52,
+ [2][1][2][0][RTW89_WW][63] = 52,
+ [2][1][2][0][RTW89_WW][71] = 52,
+ [2][1][2][0][RTW89_WW][78] = 52,
+ [2][1][2][0][RTW89_WW][86] = 52,
+ [2][1][2][0][RTW89_WW][93] = 52,
+ [2][1][2][0][RTW89_WW][101] = 52,
+ [2][1][2][0][RTW89_WW][108] = 0,
+ [2][1][2][0][RTW89_WW][116] = 0,
+ [2][1][2][1][RTW89_WW][3] = 40,
+ [2][1][2][1][RTW89_WW][11] = 40,
+ [2][1][2][1][RTW89_WW][18] = 40,
+ [2][1][2][1][RTW89_WW][26] = 40,
+ [2][1][2][1][RTW89_WW][33] = 40,
+ [2][1][2][1][RTW89_WW][41] = 40,
+ [2][1][2][1][RTW89_WW][48] = 40,
+ [2][1][2][1][RTW89_WW][56] = 40,
+ [2][1][2][1][RTW89_WW][63] = 40,
+ [2][1][2][1][RTW89_WW][71] = 40,
+ [2][1][2][1][RTW89_WW][78] = 40,
+ [2][1][2][1][RTW89_WW][86] = 40,
+ [2][1][2][1][RTW89_WW][93] = 40,
+ [2][1][2][1][RTW89_WW][101] = 40,
+ [2][1][2][1][RTW89_WW][108] = 0,
+ [2][1][2][1][RTW89_WW][116] = 0,
+ [3][0][2][0][RTW89_WW][7] = 56,
+ [3][0][2][0][RTW89_WW][22] = 56,
+ [3][0][2][0][RTW89_WW][37] = 56,
+ [3][0][2][0][RTW89_WW][52] = 56,
+ [3][0][2][0][RTW89_WW][67] = 56,
+ [3][0][2][0][RTW89_WW][82] = 56,
+ [3][0][2][0][RTW89_WW][97] = 56,
+ [3][0][2][0][RTW89_WW][112] = 0,
+ [3][1][2][0][RTW89_WW][7] = 44,
+ [3][1][2][0][RTW89_WW][22] = 44,
+ [3][1][2][0][RTW89_WW][37] = 44,
+ [3][1][2][0][RTW89_WW][52] = 44,
+ [3][1][2][0][RTW89_WW][67] = 44,
+ [3][1][2][0][RTW89_WW][82] = 44,
+ [3][1][2][0][RTW89_WW][97] = 44,
+ [3][1][2][0][RTW89_WW][112] = 0,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 32,
+ [3][1][2][1][RTW89_WW][37] = 32,
+ [3][1][2][1][RTW89_WW][52] = 32,
+ [3][1][2][1][RTW89_WW][67] = 32,
+ [3][1][2][1][RTW89_WW][82] = 32,
+ [3][1][2][1][RTW89_WW][97] = 32,
+ [3][1][2][1][RTW89_WW][112] = 0,
+ [0][0][1][0][RTW89_FCC][0] = 72,
+ [0][0][1][0][RTW89_FCC][2] = 72,
+ [0][0][1][0][RTW89_FCC][4] = 72,
+ [0][0][1][0][RTW89_FCC][6] = 72,
+ [0][0][1][0][RTW89_FCC][8] = 72,
+ [0][0][1][0][RTW89_FCC][10] = 72,
+ [0][0][1][0][RTW89_FCC][12] = 72,
+ [0][0][1][0][RTW89_FCC][14] = 72,
+ [0][0][1][0][RTW89_FCC][15] = 72,
+ [0][0][1][0][RTW89_FCC][17] = 72,
+ [0][0][1][0][RTW89_FCC][19] = 72,
+ [0][0][1][0][RTW89_FCC][21] = 72,
+ [0][0][1][0][RTW89_FCC][23] = 72,
+ [0][0][1][0][RTW89_FCC][25] = 72,
+ [0][0][1][0][RTW89_FCC][27] = 72,
+ [0][0][1][0][RTW89_FCC][29] = 72,
+ [0][0][1][0][RTW89_FCC][30] = 72,
+ [0][0][1][0][RTW89_FCC][32] = 72,
+ [0][0][1][0][RTW89_FCC][34] = 72,
+ [0][0][1][0][RTW89_FCC][36] = 72,
+ [0][0][1][0][RTW89_FCC][38] = 72,
+ [0][0][1][0][RTW89_FCC][40] = 72,
+ [0][0][1][0][RTW89_FCC][42] = 72,
+ [0][0][1][0][RTW89_FCC][44] = 72,
+ [0][0][1][0][RTW89_FCC][45] = 72,
+ [0][0][1][0][RTW89_FCC][47] = 72,
+ [0][0][1][0][RTW89_FCC][49] = 72,
+ [0][0][1][0][RTW89_FCC][51] = 72,
+ [0][0][1][0][RTW89_FCC][53] = 72,
+ [0][0][1][0][RTW89_FCC][55] = 72,
+ [0][0][1][0][RTW89_FCC][57] = 72,
+ [0][0][1][0][RTW89_FCC][59] = 72,
+ [0][0][1][0][RTW89_FCC][60] = 72,
+ [0][0][1][0][RTW89_FCC][62] = 72,
+ [0][0][1][0][RTW89_FCC][64] = 72,
+ [0][0][1][0][RTW89_FCC][66] = 72,
+ [0][0][1][0][RTW89_FCC][68] = 72,
+ [0][0][1][0][RTW89_FCC][70] = 72,
+ [0][0][1][0][RTW89_FCC][72] = 72,
+ [0][0][1][0][RTW89_FCC][74] = 72,
+ [0][0][1][0][RTW89_FCC][75] = 72,
+ [0][0][1][0][RTW89_FCC][77] = 72,
+ [0][0][1][0][RTW89_FCC][79] = 72,
+ [0][0][1][0][RTW89_FCC][81] = 72,
+ [0][0][1][0][RTW89_FCC][83] = 72,
+ [0][0][1][0][RTW89_FCC][85] = 72,
+ [0][0][1][0][RTW89_FCC][87] = 72,
+ [0][0][1][0][RTW89_FCC][89] = 72,
+ [0][0][1][0][RTW89_FCC][90] = 72,
+ [0][0][1][0][RTW89_FCC][92] = 72,
+ [0][0][1][0][RTW89_FCC][94] = 72,
+ [0][0][1][0][RTW89_FCC][96] = 72,
+ [0][0][1][0][RTW89_FCC][98] = 72,
+ [0][0][1][0][RTW89_FCC][100] = 72,
+ [0][0][1][0][RTW89_FCC][102] = 72,
+ [0][0][1][0][RTW89_FCC][104] = 72,
+ [0][0][1][0][RTW89_FCC][105] = 72,
+ [0][0][1][0][RTW89_FCC][107] = 72,
+ [0][0][1][0][RTW89_FCC][109] = 72,
+ [0][0][1][0][RTW89_FCC][111] = 127,
+ [0][0][1][0][RTW89_FCC][113] = 127,
+ [0][0][1][0][RTW89_FCC][115] = 127,
+ [0][0][1][0][RTW89_FCC][117] = 127,
+ [0][0][1][0][RTW89_FCC][119] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 60,
+ [0][1][1][0][RTW89_FCC][2] = 60,
+ [0][1][1][0][RTW89_FCC][4] = 60,
+ [0][1][1][0][RTW89_FCC][6] = 60,
+ [0][1][1][0][RTW89_FCC][8] = 60,
+ [0][1][1][0][RTW89_FCC][10] = 60,
+ [0][1][1][0][RTW89_FCC][12] = 60,
+ [0][1][1][0][RTW89_FCC][14] = 60,
+ [0][1][1][0][RTW89_FCC][15] = 60,
+ [0][1][1][0][RTW89_FCC][17] = 60,
+ [0][1][1][0][RTW89_FCC][19] = 60,
+ [0][1][1][0][RTW89_FCC][21] = 60,
+ [0][1][1][0][RTW89_FCC][23] = 60,
+ [0][1][1][0][RTW89_FCC][25] = 60,
+ [0][1][1][0][RTW89_FCC][27] = 60,
+ [0][1][1][0][RTW89_FCC][29] = 60,
+ [0][1][1][0][RTW89_FCC][30] = 60,
+ [0][1][1][0][RTW89_FCC][32] = 60,
+ [0][1][1][0][RTW89_FCC][34] = 60,
+ [0][1][1][0][RTW89_FCC][36] = 60,
+ [0][1][1][0][RTW89_FCC][38] = 60,
+ [0][1][1][0][RTW89_FCC][40] = 60,
+ [0][1][1][0][RTW89_FCC][42] = 60,
+ [0][1][1][0][RTW89_FCC][44] = 60,
+ [0][1][1][0][RTW89_FCC][45] = 60,
+ [0][1][1][0][RTW89_FCC][47] = 60,
+ [0][1][1][0][RTW89_FCC][49] = 60,
+ [0][1][1][0][RTW89_FCC][51] = 60,
+ [0][1][1][0][RTW89_FCC][53] = 60,
+ [0][1][1][0][RTW89_FCC][55] = 60,
+ [0][1][1][0][RTW89_FCC][57] = 60,
+ [0][1][1][0][RTW89_FCC][59] = 60,
+ [0][1][1][0][RTW89_FCC][60] = 60,
+ [0][1][1][0][RTW89_FCC][62] = 60,
+ [0][1][1][0][RTW89_FCC][64] = 60,
+ [0][1][1][0][RTW89_FCC][66] = 60,
+ [0][1][1][0][RTW89_FCC][68] = 60,
+ [0][1][1][0][RTW89_FCC][70] = 60,
+ [0][1][1][0][RTW89_FCC][72] = 60,
+ [0][1][1][0][RTW89_FCC][74] = 60,
+ [0][1][1][0][RTW89_FCC][75] = 60,
+ [0][1][1][0][RTW89_FCC][77] = 60,
+ [0][1][1][0][RTW89_FCC][79] = 60,
+ [0][1][1][0][RTW89_FCC][81] = 60,
+ [0][1][1][0][RTW89_FCC][83] = 60,
+ [0][1][1][0][RTW89_FCC][85] = 60,
+ [0][1][1][0][RTW89_FCC][87] = 60,
+ [0][1][1][0][RTW89_FCC][89] = 60,
+ [0][1][1][0][RTW89_FCC][90] = 60,
+ [0][1][1][0][RTW89_FCC][92] = 60,
+ [0][1][1][0][RTW89_FCC][94] = 60,
+ [0][1][1][0][RTW89_FCC][96] = 60,
+ [0][1][1][0][RTW89_FCC][98] = 60,
+ [0][1][1][0][RTW89_FCC][100] = 60,
+ [0][1][1][0][RTW89_FCC][102] = 60,
+ [0][1][1][0][RTW89_FCC][104] = 60,
+ [0][1][1][0][RTW89_FCC][105] = 60,
+ [0][1][1][0][RTW89_FCC][107] = 60,
+ [0][1][1][0][RTW89_FCC][109] = 60,
+ [0][1][1][0][RTW89_FCC][111] = 127,
+ [0][1][1][0][RTW89_FCC][113] = 127,
+ [0][1][1][0][RTW89_FCC][115] = 127,
+ [0][1][1][0][RTW89_FCC][117] = 127,
+ [0][1][1][0][RTW89_FCC][119] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 72,
+ [0][0][2][0][RTW89_FCC][2] = 72,
+ [0][0][2][0][RTW89_FCC][4] = 72,
+ [0][0][2][0][RTW89_FCC][6] = 72,
+ [0][0][2][0][RTW89_FCC][8] = 72,
+ [0][0][2][0][RTW89_FCC][10] = 72,
+ [0][0][2][0][RTW89_FCC][12] = 72,
+ [0][0][2][0][RTW89_FCC][14] = 72,
+ [0][0][2][0][RTW89_FCC][15] = 72,
+ [0][0][2][0][RTW89_FCC][17] = 72,
+ [0][0][2][0][RTW89_FCC][19] = 72,
+ [0][0][2][0][RTW89_FCC][21] = 72,
+ [0][0][2][0][RTW89_FCC][23] = 72,
+ [0][0][2][0][RTW89_FCC][25] = 72,
+ [0][0][2][0][RTW89_FCC][27] = 72,
+ [0][0][2][0][RTW89_FCC][29] = 72,
+ [0][0][2][0][RTW89_FCC][30] = 72,
+ [0][0][2][0][RTW89_FCC][32] = 72,
+ [0][0][2][0][RTW89_FCC][34] = 72,
+ [0][0][2][0][RTW89_FCC][36] = 72,
+ [0][0][2][0][RTW89_FCC][38] = 72,
+ [0][0][2][0][RTW89_FCC][40] = 72,
+ [0][0][2][0][RTW89_FCC][42] = 72,
+ [0][0][2][0][RTW89_FCC][44] = 72,
+ [0][0][2][0][RTW89_FCC][45] = 72,
+ [0][0][2][0][RTW89_FCC][47] = 72,
+ [0][0][2][0][RTW89_FCC][49] = 72,
+ [0][0][2][0][RTW89_FCC][51] = 72,
+ [0][0][2][0][RTW89_FCC][53] = 72,
+ [0][0][2][0][RTW89_FCC][55] = 72,
+ [0][0][2][0][RTW89_FCC][57] = 72,
+ [0][0][2][0][RTW89_FCC][59] = 72,
+ [0][0][2][0][RTW89_FCC][60] = 72,
+ [0][0][2][0][RTW89_FCC][62] = 72,
+ [0][0][2][0][RTW89_FCC][64] = 72,
+ [0][0][2][0][RTW89_FCC][66] = 72,
+ [0][0][2][0][RTW89_FCC][68] = 72,
+ [0][0][2][0][RTW89_FCC][70] = 72,
+ [0][0][2][0][RTW89_FCC][72] = 72,
+ [0][0][2][0][RTW89_FCC][74] = 72,
+ [0][0][2][0][RTW89_FCC][75] = 72,
+ [0][0][2][0][RTW89_FCC][77] = 72,
+ [0][0][2][0][RTW89_FCC][79] = 72,
+ [0][0][2][0][RTW89_FCC][81] = 72,
+ [0][0][2][0][RTW89_FCC][83] = 72,
+ [0][0][2][0][RTW89_FCC][85] = 72,
+ [0][0][2][0][RTW89_FCC][87] = 72,
+ [0][0][2][0][RTW89_FCC][89] = 72,
+ [0][0][2][0][RTW89_FCC][90] = 72,
+ [0][0][2][0][RTW89_FCC][92] = 72,
+ [0][0][2][0][RTW89_FCC][94] = 72,
+ [0][0][2][0][RTW89_FCC][96] = 72,
+ [0][0][2][0][RTW89_FCC][98] = 72,
+ [0][0][2][0][RTW89_FCC][100] = 72,
+ [0][0][2][0][RTW89_FCC][102] = 72,
+ [0][0][2][0][RTW89_FCC][104] = 72,
+ [0][0][2][0][RTW89_FCC][105] = 72,
+ [0][0][2][0][RTW89_FCC][107] = 72,
+ [0][0][2][0][RTW89_FCC][109] = 72,
+ [0][0][2][0][RTW89_FCC][111] = 127,
+ [0][0][2][0][RTW89_FCC][113] = 127,
+ [0][0][2][0][RTW89_FCC][115] = 127,
+ [0][0][2][0][RTW89_FCC][117] = 127,
+ [0][0][2][0][RTW89_FCC][119] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 60,
+ [0][1][2][0][RTW89_FCC][2] = 60,
+ [0][1][2][0][RTW89_FCC][4] = 60,
+ [0][1][2][0][RTW89_FCC][6] = 60,
+ [0][1][2][0][RTW89_FCC][8] = 60,
+ [0][1][2][0][RTW89_FCC][10] = 60,
+ [0][1][2][0][RTW89_FCC][12] = 60,
+ [0][1][2][0][RTW89_FCC][14] = 60,
+ [0][1][2][0][RTW89_FCC][15] = 60,
+ [0][1][2][0][RTW89_FCC][17] = 60,
+ [0][1][2][0][RTW89_FCC][19] = 60,
+ [0][1][2][0][RTW89_FCC][21] = 60,
+ [0][1][2][0][RTW89_FCC][23] = 60,
+ [0][1][2][0][RTW89_FCC][25] = 60,
+ [0][1][2][0][RTW89_FCC][27] = 60,
+ [0][1][2][0][RTW89_FCC][29] = 60,
+ [0][1][2][0][RTW89_FCC][30] = 60,
+ [0][1][2][0][RTW89_FCC][32] = 60,
+ [0][1][2][0][RTW89_FCC][34] = 60,
+ [0][1][2][0][RTW89_FCC][36] = 60,
+ [0][1][2][0][RTW89_FCC][38] = 60,
+ [0][1][2][0][RTW89_FCC][40] = 60,
+ [0][1][2][0][RTW89_FCC][42] = 60,
+ [0][1][2][0][RTW89_FCC][44] = 60,
+ [0][1][2][0][RTW89_FCC][45] = 60,
+ [0][1][2][0][RTW89_FCC][47] = 60,
+ [0][1][2][0][RTW89_FCC][49] = 60,
+ [0][1][2][0][RTW89_FCC][51] = 60,
+ [0][1][2][0][RTW89_FCC][53] = 60,
+ [0][1][2][0][RTW89_FCC][55] = 60,
+ [0][1][2][0][RTW89_FCC][57] = 60,
+ [0][1][2][0][RTW89_FCC][59] = 60,
+ [0][1][2][0][RTW89_FCC][60] = 60,
+ [0][1][2][0][RTW89_FCC][62] = 60,
+ [0][1][2][0][RTW89_FCC][64] = 60,
+ [0][1][2][0][RTW89_FCC][66] = 60,
+ [0][1][2][0][RTW89_FCC][68] = 60,
+ [0][1][2][0][RTW89_FCC][70] = 60,
+ [0][1][2][0][RTW89_FCC][72] = 60,
+ [0][1][2][0][RTW89_FCC][74] = 60,
+ [0][1][2][0][RTW89_FCC][75] = 60,
+ [0][1][2][0][RTW89_FCC][77] = 60,
+ [0][1][2][0][RTW89_FCC][79] = 60,
+ [0][1][2][0][RTW89_FCC][81] = 60,
+ [0][1][2][0][RTW89_FCC][83] = 60,
+ [0][1][2][0][RTW89_FCC][85] = 60,
+ [0][1][2][0][RTW89_FCC][87] = 60,
+ [0][1][2][0][RTW89_FCC][89] = 60,
+ [0][1][2][0][RTW89_FCC][90] = 60,
+ [0][1][2][0][RTW89_FCC][92] = 60,
+ [0][1][2][0][RTW89_FCC][94] = 60,
+ [0][1][2][0][RTW89_FCC][96] = 60,
+ [0][1][2][0][RTW89_FCC][98] = 60,
+ [0][1][2][0][RTW89_FCC][100] = 60,
+ [0][1][2][0][RTW89_FCC][102] = 60,
+ [0][1][2][0][RTW89_FCC][104] = 60,
+ [0][1][2][0][RTW89_FCC][105] = 60,
+ [0][1][2][0][RTW89_FCC][107] = 60,
+ [0][1][2][0][RTW89_FCC][109] = 60,
+ [0][1][2][0][RTW89_FCC][111] = 127,
+ [0][1][2][0][RTW89_FCC][113] = 127,
+ [0][1][2][0][RTW89_FCC][115] = 127,
+ [0][1][2][0][RTW89_FCC][117] = 127,
+ [0][1][2][0][RTW89_FCC][119] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 48,
+ [0][1][2][1][RTW89_FCC][2] = 48,
+ [0][1][2][1][RTW89_FCC][4] = 48,
+ [0][1][2][1][RTW89_FCC][6] = 48,
+ [0][1][2][1][RTW89_FCC][8] = 48,
+ [0][1][2][1][RTW89_FCC][10] = 48,
+ [0][1][2][1][RTW89_FCC][12] = 48,
+ [0][1][2][1][RTW89_FCC][14] = 48,
+ [0][1][2][1][RTW89_FCC][15] = 48,
+ [0][1][2][1][RTW89_FCC][17] = 48,
+ [0][1][2][1][RTW89_FCC][19] = 48,
+ [0][1][2][1][RTW89_FCC][21] = 48,
+ [0][1][2][1][RTW89_FCC][23] = 48,
+ [0][1][2][1][RTW89_FCC][25] = 48,
+ [0][1][2][1][RTW89_FCC][27] = 48,
+ [0][1][2][1][RTW89_FCC][29] = 48,
+ [0][1][2][1][RTW89_FCC][30] = 48,
+ [0][1][2][1][RTW89_FCC][32] = 48,
+ [0][1][2][1][RTW89_FCC][34] = 48,
+ [0][1][2][1][RTW89_FCC][36] = 48,
+ [0][1][2][1][RTW89_FCC][38] = 48,
+ [0][1][2][1][RTW89_FCC][40] = 48,
+ [0][1][2][1][RTW89_FCC][42] = 48,
+ [0][1][2][1][RTW89_FCC][44] = 48,
+ [0][1][2][1][RTW89_FCC][45] = 48,
+ [0][1][2][1][RTW89_FCC][47] = 48,
+ [0][1][2][1][RTW89_FCC][49] = 48,
+ [0][1][2][1][RTW89_FCC][51] = 48,
+ [0][1][2][1][RTW89_FCC][53] = 48,
+ [0][1][2][1][RTW89_FCC][55] = 48,
+ [0][1][2][1][RTW89_FCC][57] = 48,
+ [0][1][2][1][RTW89_FCC][59] = 48,
+ [0][1][2][1][RTW89_FCC][60] = 48,
+ [0][1][2][1][RTW89_FCC][62] = 48,
+ [0][1][2][1][RTW89_FCC][64] = 48,
+ [0][1][2][1][RTW89_FCC][66] = 48,
+ [0][1][2][1][RTW89_FCC][68] = 48,
+ [0][1][2][1][RTW89_FCC][70] = 48,
+ [0][1][2][1][RTW89_FCC][72] = 48,
+ [0][1][2][1][RTW89_FCC][74] = 48,
+ [0][1][2][1][RTW89_FCC][75] = 48,
+ [0][1][2][1][RTW89_FCC][77] = 48,
+ [0][1][2][1][RTW89_FCC][79] = 48,
+ [0][1][2][1][RTW89_FCC][81] = 48,
+ [0][1][2][1][RTW89_FCC][83] = 48,
+ [0][1][2][1][RTW89_FCC][85] = 48,
+ [0][1][2][1][RTW89_FCC][87] = 48,
+ [0][1][2][1][RTW89_FCC][89] = 48,
+ [0][1][2][1][RTW89_FCC][90] = 48,
+ [0][1][2][1][RTW89_FCC][92] = 48,
+ [0][1][2][1][RTW89_FCC][94] = 48,
+ [0][1][2][1][RTW89_FCC][96] = 48,
+ [0][1][2][1][RTW89_FCC][98] = 48,
+ [0][1][2][1][RTW89_FCC][100] = 48,
+ [0][1][2][1][RTW89_FCC][102] = 48,
+ [0][1][2][1][RTW89_FCC][104] = 48,
+ [0][1][2][1][RTW89_FCC][105] = 48,
+ [0][1][2][1][RTW89_FCC][107] = 48,
+ [0][1][2][1][RTW89_FCC][109] = 48,
+ [0][1][2][1][RTW89_FCC][111] = 127,
+ [0][1][2][1][RTW89_FCC][113] = 127,
+ [0][1][2][1][RTW89_FCC][115] = 127,
+ [0][1][2][1][RTW89_FCC][117] = 127,
+ [0][1][2][1][RTW89_FCC][119] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 72,
+ [1][0][2][0][RTW89_FCC][5] = 72,
+ [1][0][2][0][RTW89_FCC][9] = 72,
+ [1][0][2][0][RTW89_FCC][13] = 72,
+ [1][0][2][0][RTW89_FCC][16] = 72,
+ [1][0][2][0][RTW89_FCC][20] = 72,
+ [1][0][2][0][RTW89_FCC][24] = 72,
+ [1][0][2][0][RTW89_FCC][28] = 72,
+ [1][0][2][0][RTW89_FCC][31] = 72,
+ [1][0][2][0][RTW89_FCC][35] = 72,
+ [1][0][2][0][RTW89_FCC][39] = 72,
+ [1][0][2][0][RTW89_FCC][43] = 72,
+ [1][0][2][0][RTW89_FCC][46] = 72,
+ [1][0][2][0][RTW89_FCC][50] = 72,
+ [1][0][2][0][RTW89_FCC][54] = 72,
+ [1][0][2][0][RTW89_FCC][58] = 72,
+ [1][0][2][0][RTW89_FCC][61] = 72,
+ [1][0][2][0][RTW89_FCC][65] = 72,
+ [1][0][2][0][RTW89_FCC][69] = 72,
+ [1][0][2][0][RTW89_FCC][73] = 72,
+ [1][0][2][0][RTW89_FCC][76] = 72,
+ [1][0][2][0][RTW89_FCC][80] = 72,
+ [1][0][2][0][RTW89_FCC][84] = 72,
+ [1][0][2][0][RTW89_FCC][88] = 72,
+ [1][0][2][0][RTW89_FCC][91] = 72,
+ [1][0][2][0][RTW89_FCC][95] = 72,
+ [1][0][2][0][RTW89_FCC][99] = 72,
+ [1][0][2][0][RTW89_FCC][103] = 72,
+ [1][0][2][0][RTW89_FCC][106] = 72,
+ [1][0][2][0][RTW89_FCC][110] = 127,
+ [1][0][2][0][RTW89_FCC][114] = 127,
+ [1][0][2][0][RTW89_FCC][118] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 60,
+ [1][1][2][0][RTW89_FCC][5] = 60,
+ [1][1][2][0][RTW89_FCC][9] = 60,
+ [1][1][2][0][RTW89_FCC][13] = 60,
+ [1][1][2][0][RTW89_FCC][16] = 60,
+ [1][1][2][0][RTW89_FCC][20] = 60,
+ [1][1][2][0][RTW89_FCC][24] = 60,
+ [1][1][2][0][RTW89_FCC][28] = 60,
+ [1][1][2][0][RTW89_FCC][31] = 60,
+ [1][1][2][0][RTW89_FCC][35] = 60,
+ [1][1][2][0][RTW89_FCC][39] = 60,
+ [1][1][2][0][RTW89_FCC][43] = 60,
+ [1][1][2][0][RTW89_FCC][46] = 60,
+ [1][1][2][0][RTW89_FCC][50] = 60,
+ [1][1][2][0][RTW89_FCC][54] = 60,
+ [1][1][2][0][RTW89_FCC][58] = 60,
+ [1][1][2][0][RTW89_FCC][61] = 60,
+ [1][1][2][0][RTW89_FCC][65] = 60,
+ [1][1][2][0][RTW89_FCC][69] = 60,
+ [1][1][2][0][RTW89_FCC][73] = 60,
+ [1][1][2][0][RTW89_FCC][76] = 60,
+ [1][1][2][0][RTW89_FCC][80] = 60,
+ [1][1][2][0][RTW89_FCC][84] = 60,
+ [1][1][2][0][RTW89_FCC][88] = 60,
+ [1][1][2][0][RTW89_FCC][91] = 60,
+ [1][1][2][0][RTW89_FCC][95] = 60,
+ [1][1][2][0][RTW89_FCC][99] = 60,
+ [1][1][2][0][RTW89_FCC][103] = 60,
+ [1][1][2][0][RTW89_FCC][106] = 60,
+ [1][1][2][0][RTW89_FCC][110] = 127,
+ [1][1][2][0][RTW89_FCC][114] = 127,
+ [1][1][2][0][RTW89_FCC][118] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 48,
+ [1][1][2][1][RTW89_FCC][5] = 48,
+ [1][1][2][1][RTW89_FCC][9] = 48,
+ [1][1][2][1][RTW89_FCC][13] = 48,
+ [1][1][2][1][RTW89_FCC][16] = 48,
+ [1][1][2][1][RTW89_FCC][20] = 48,
+ [1][1][2][1][RTW89_FCC][24] = 48,
+ [1][1][2][1][RTW89_FCC][28] = 48,
+ [1][1][2][1][RTW89_FCC][31] = 48,
+ [1][1][2][1][RTW89_FCC][35] = 48,
+ [1][1][2][1][RTW89_FCC][39] = 48,
+ [1][1][2][1][RTW89_FCC][43] = 48,
+ [1][1][2][1][RTW89_FCC][46] = 48,
+ [1][1][2][1][RTW89_FCC][50] = 48,
+ [1][1][2][1][RTW89_FCC][54] = 48,
+ [1][1][2][1][RTW89_FCC][58] = 48,
+ [1][1][2][1][RTW89_FCC][61] = 48,
+ [1][1][2][1][RTW89_FCC][65] = 48,
+ [1][1][2][1][RTW89_FCC][69] = 48,
+ [1][1][2][1][RTW89_FCC][73] = 48,
+ [1][1][2][1][RTW89_FCC][76] = 48,
+ [1][1][2][1][RTW89_FCC][80] = 48,
+ [1][1][2][1][RTW89_FCC][84] = 48,
+ [1][1][2][1][RTW89_FCC][88] = 48,
+ [1][1][2][1][RTW89_FCC][91] = 48,
+ [1][1][2][1][RTW89_FCC][95] = 48,
+ [1][1][2][1][RTW89_FCC][99] = 48,
+ [1][1][2][1][RTW89_FCC][103] = 48,
+ [1][1][2][1][RTW89_FCC][106] = 48,
+ [1][1][2][1][RTW89_FCC][110] = 127,
+ [1][1][2][1][RTW89_FCC][114] = 127,
+ [1][1][2][1][RTW89_FCC][118] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 64,
+ [2][0][2][0][RTW89_FCC][11] = 64,
+ [2][0][2][0][RTW89_FCC][18] = 64,
+ [2][0][2][0][RTW89_FCC][26] = 64,
+ [2][0][2][0][RTW89_FCC][33] = 64,
+ [2][0][2][0][RTW89_FCC][41] = 64,
+ [2][0][2][0][RTW89_FCC][48] = 64,
+ [2][0][2][0][RTW89_FCC][56] = 64,
+ [2][0][2][0][RTW89_FCC][63] = 64,
+ [2][0][2][0][RTW89_FCC][71] = 64,
+ [2][0][2][0][RTW89_FCC][78] = 64,
+ [2][0][2][0][RTW89_FCC][86] = 64,
+ [2][0][2][0][RTW89_FCC][93] = 64,
+ [2][0][2][0][RTW89_FCC][101] = 64,
+ [2][0][2][0][RTW89_FCC][108] = 127,
+ [2][0][2][0][RTW89_FCC][116] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 52,
+ [2][1][2][0][RTW89_FCC][11] = 52,
+ [2][1][2][0][RTW89_FCC][18] = 52,
+ [2][1][2][0][RTW89_FCC][26] = 52,
+ [2][1][2][0][RTW89_FCC][33] = 52,
+ [2][1][2][0][RTW89_FCC][41] = 52,
+ [2][1][2][0][RTW89_FCC][48] = 52,
+ [2][1][2][0][RTW89_FCC][56] = 52,
+ [2][1][2][0][RTW89_FCC][63] = 52,
+ [2][1][2][0][RTW89_FCC][71] = 52,
+ [2][1][2][0][RTW89_FCC][78] = 52,
+ [2][1][2][0][RTW89_FCC][86] = 52,
+ [2][1][2][0][RTW89_FCC][93] = 52,
+ [2][1][2][0][RTW89_FCC][101] = 52,
+ [2][1][2][0][RTW89_FCC][108] = 127,
+ [2][1][2][0][RTW89_FCC][116] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 40,
+ [2][1][2][1][RTW89_FCC][33] = 40,
+ [2][1][2][1][RTW89_FCC][41] = 40,
+ [2][1][2][1][RTW89_FCC][48] = 40,
+ [2][1][2][1][RTW89_FCC][56] = 40,
+ [2][1][2][1][RTW89_FCC][63] = 40,
+ [2][1][2][1][RTW89_FCC][71] = 40,
+ [2][1][2][1][RTW89_FCC][78] = 40,
+ [2][1][2][1][RTW89_FCC][86] = 40,
+ [2][1][2][1][RTW89_FCC][93] = 40,
+ [2][1][2][1][RTW89_FCC][101] = 40,
+ [2][1][2][1][RTW89_FCC][108] = 127,
+ [2][1][2][1][RTW89_FCC][116] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 56,
+ [3][0][2][0][RTW89_FCC][22] = 56,
+ [3][0][2][0][RTW89_FCC][37] = 56,
+ [3][0][2][0][RTW89_FCC][52] = 56,
+ [3][0][2][0][RTW89_FCC][67] = 56,
+ [3][0][2][0][RTW89_FCC][82] = 56,
+ [3][0][2][0][RTW89_FCC][97] = 56,
+ [3][0][2][0][RTW89_FCC][112] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 44,
+ [3][1][2][0][RTW89_FCC][22] = 44,
+ [3][1][2][0][RTW89_FCC][37] = 44,
+ [3][1][2][0][RTW89_FCC][52] = 44,
+ [3][1][2][0][RTW89_FCC][67] = 44,
+ [3][1][2][0][RTW89_FCC][82] = 44,
+ [3][1][2][0][RTW89_FCC][97] = 44,
+ [3][1][2][0][RTW89_FCC][112] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
+ [3][1][2][1][RTW89_FCC][22] = 32,
+ [3][1][2][1][RTW89_FCC][37] = 32,
+ [3][1][2][1][RTW89_FCC][52] = 32,
+ [3][1][2][1][RTW89_FCC][67] = 32,
+ [3][1][2][1][RTW89_FCC][82] = 32,
+ [3][1][2][1][RTW89_FCC][97] = 32,
+ [3][1][2][1][RTW89_FCC][112] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 32,
+ [0][0][RTW89_WW][1] = 32,
+ [0][0][RTW89_WW][2] = 32,
+ [0][0][RTW89_WW][3] = 32,
+ [0][0][RTW89_WW][4] = 32,
+ [0][0][RTW89_WW][5] = 32,
+ [0][0][RTW89_WW][6] = 32,
+ [0][0][RTW89_WW][7] = 32,
+ [0][0][RTW89_WW][8] = 32,
+ [0][0][RTW89_WW][9] = 32,
+ [0][0][RTW89_WW][10] = 32,
+ [0][0][RTW89_WW][11] = 32,
+ [0][0][RTW89_WW][12] = 32,
+ [0][0][RTW89_WW][13] = 0,
+ [0][1][RTW89_WW][0] = 20,
+ [0][1][RTW89_WW][1] = 22,
+ [0][1][RTW89_WW][2] = 22,
+ [0][1][RTW89_WW][3] = 22,
+ [0][1][RTW89_WW][4] = 22,
+ [0][1][RTW89_WW][5] = 22,
+ [0][1][RTW89_WW][6] = 22,
+ [0][1][RTW89_WW][7] = 22,
+ [0][1][RTW89_WW][8] = 22,
+ [0][1][RTW89_WW][9] = 22,
+ [0][1][RTW89_WW][10] = 22,
+ [0][1][RTW89_WW][11] = 22,
+ [0][1][RTW89_WW][12] = 20,
+ [0][1][RTW89_WW][13] = 0,
+ [1][0][RTW89_WW][0] = 42,
+ [1][0][RTW89_WW][1] = 44,
+ [1][0][RTW89_WW][2] = 44,
+ [1][0][RTW89_WW][3] = 44,
+ [1][0][RTW89_WW][4] = 44,
+ [1][0][RTW89_WW][5] = 44,
+ [1][0][RTW89_WW][6] = 44,
+ [1][0][RTW89_WW][7] = 44,
+ [1][0][RTW89_WW][8] = 44,
+ [1][0][RTW89_WW][9] = 44,
+ [1][0][RTW89_WW][10] = 44,
+ [1][0][RTW89_WW][11] = 44,
+ [1][0][RTW89_WW][12] = 38,
+ [1][0][RTW89_WW][13] = 0,
+ [1][1][RTW89_WW][0] = 32,
+ [1][1][RTW89_WW][1] = 32,
+ [1][1][RTW89_WW][2] = 32,
+ [1][1][RTW89_WW][3] = 32,
+ [1][1][RTW89_WW][4] = 32,
+ [1][1][RTW89_WW][5] = 32,
+ [1][1][RTW89_WW][6] = 32,
+ [1][1][RTW89_WW][7] = 32,
+ [1][1][RTW89_WW][8] = 32,
+ [1][1][RTW89_WW][9] = 32,
+ [1][1][RTW89_WW][10] = 32,
+ [1][1][RTW89_WW][11] = 32,
+ [1][1][RTW89_WW][12] = 32,
+ [1][1][RTW89_WW][13] = 0,
+ [2][0][RTW89_WW][0] = 56,
+ [2][0][RTW89_WW][1] = 56,
+ [2][0][RTW89_WW][2] = 56,
+ [2][0][RTW89_WW][3] = 56,
+ [2][0][RTW89_WW][4] = 56,
+ [2][0][RTW89_WW][5] = 56,
+ [2][0][RTW89_WW][6] = 56,
+ [2][0][RTW89_WW][7] = 56,
+ [2][0][RTW89_WW][8] = 56,
+ [2][0][RTW89_WW][9] = 56,
+ [2][0][RTW89_WW][10] = 56,
+ [2][0][RTW89_WW][11] = 56,
+ [2][0][RTW89_WW][12] = 56,
+ [2][0][RTW89_WW][13] = 0,
+ [2][1][RTW89_WW][0] = 44,
+ [2][1][RTW89_WW][1] = 44,
+ [2][1][RTW89_WW][2] = 44,
+ [2][1][RTW89_WW][3] = 44,
+ [2][1][RTW89_WW][4] = 44,
+ [2][1][RTW89_WW][5] = 44,
+ [2][1][RTW89_WW][6] = 44,
+ [2][1][RTW89_WW][7] = 44,
+ [2][1][RTW89_WW][8] = 44,
+ [2][1][RTW89_WW][9] = 44,
+ [2][1][RTW89_WW][10] = 44,
+ [2][1][RTW89_WW][11] = 44,
+ [2][1][RTW89_WW][12] = 42,
+ [2][1][RTW89_WW][13] = 0,
+ [0][0][RTW89_FCC][0] = 68,
+ [0][0][RTW89_ETSI][0] = 36,
+ [0][0][RTW89_MKK][0] = 38,
+ [0][0][RTW89_IC][0] = 68,
+ [0][0][RTW89_ACMA][0] = 32,
+ [0][0][RTW89_FCC][1] = 68,
+ [0][0][RTW89_ETSI][1] = 40,
+ [0][0][RTW89_MKK][1] = 44,
+ [0][0][RTW89_IC][1] = 68,
+ [0][0][RTW89_ACMA][1] = 32,
+ [0][0][RTW89_FCC][2] = 72,
+ [0][0][RTW89_ETSI][2] = 40,
+ [0][0][RTW89_MKK][2] = 44,
+ [0][0][RTW89_IC][2] = 72,
+ [0][0][RTW89_ACMA][2] = 32,
+ [0][0][RTW89_FCC][3] = 76,
+ [0][0][RTW89_ETSI][3] = 40,
+ [0][0][RTW89_MKK][3] = 44,
+ [0][0][RTW89_IC][3] = 76,
+ [0][0][RTW89_ACMA][3] = 32,
+ [0][0][RTW89_FCC][4] = 76,
+ [0][0][RTW89_ETSI][4] = 40,
+ [0][0][RTW89_MKK][4] = 44,
+ [0][0][RTW89_IC][4] = 76,
+ [0][0][RTW89_ACMA][4] = 32,
+ [0][0][RTW89_FCC][5] = 84,
+ [0][0][RTW89_ETSI][5] = 40,
+ [0][0][RTW89_MKK][5] = 44,
+ [0][0][RTW89_IC][5] = 84,
+ [0][0][RTW89_ACMA][5] = 32,
+ [0][0][RTW89_FCC][6] = 74,
+ [0][0][RTW89_ETSI][6] = 40,
+ [0][0][RTW89_MKK][6] = 44,
+ [0][0][RTW89_IC][6] = 74,
+ [0][0][RTW89_ACMA][6] = 32,
+ [0][0][RTW89_FCC][7] = 74,
+ [0][0][RTW89_ETSI][7] = 40,
+ [0][0][RTW89_MKK][7] = 44,
+ [0][0][RTW89_IC][7] = 74,
+ [0][0][RTW89_ACMA][7] = 32,
+ [0][0][RTW89_FCC][8] = 70,
+ [0][0][RTW89_ETSI][8] = 40,
+ [0][0][RTW89_MKK][8] = 44,
+ [0][0][RTW89_IC][8] = 70,
+ [0][0][RTW89_ACMA][8] = 32,
+ [0][0][RTW89_FCC][9] = 66,
+ [0][0][RTW89_ETSI][9] = 40,
+ [0][0][RTW89_MKK][9] = 44,
+ [0][0][RTW89_IC][9] = 66,
+ [0][0][RTW89_ACMA][9] = 32,
+ [0][0][RTW89_FCC][10] = 66,
+ [0][0][RTW89_ETSI][10] = 40,
+ [0][0][RTW89_MKK][10] = 44,
+ [0][0][RTW89_IC][10] = 66,
+ [0][0][RTW89_ACMA][10] = 32,
+ [0][0][RTW89_FCC][11] = 56,
+ [0][0][RTW89_ETSI][11] = 40,
+ [0][0][RTW89_MKK][11] = 44,
+ [0][0][RTW89_IC][11] = 56,
+ [0][0][RTW89_ACMA][11] = 32,
+ [0][0][RTW89_FCC][12] = 32,
+ [0][0][RTW89_ETSI][12] = 36,
+ [0][0][RTW89_MKK][12] = 38,
+ [0][0][RTW89_IC][12] = 32,
+ [0][0][RTW89_ACMA][12] = 32,
+ [0][0][RTW89_FCC][13] = 127,
+ [0][0][RTW89_ETSI][13] = 127,
+ [0][0][RTW89_MKK][13] = 127,
+ [0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_ACMA][13] = 127,
+ [0][1][RTW89_FCC][0] = 62,
+ [0][1][RTW89_ETSI][0] = 24,
+ [0][1][RTW89_MKK][0] = 26,
+ [0][1][RTW89_IC][0] = 62,
+ [0][1][RTW89_ACMA][0] = 20,
+ [0][1][RTW89_FCC][1] = 62,
+ [0][1][RTW89_ETSI][1] = 26,
+ [0][1][RTW89_MKK][1] = 32,
+ [0][1][RTW89_IC][1] = 62,
+ [0][1][RTW89_ACMA][1] = 22,
+ [0][1][RTW89_FCC][2] = 66,
+ [0][1][RTW89_ETSI][2] = 26,
+ [0][1][RTW89_MKK][2] = 32,
+ [0][1][RTW89_IC][2] = 66,
+ [0][1][RTW89_ACMA][2] = 22,
+ [0][1][RTW89_FCC][3] = 70,
+ [0][1][RTW89_ETSI][3] = 26,
+ [0][1][RTW89_MKK][3] = 32,
+ [0][1][RTW89_IC][3] = 70,
+ [0][1][RTW89_ACMA][3] = 22,
+ [0][1][RTW89_FCC][4] = 74,
+ [0][1][RTW89_ETSI][4] = 26,
+ [0][1][RTW89_MKK][4] = 32,
+ [0][1][RTW89_IC][4] = 74,
+ [0][1][RTW89_ACMA][4] = 22,
+ [0][1][RTW89_FCC][5] = 74,
+ [0][1][RTW89_ETSI][5] = 26,
+ [0][1][RTW89_MKK][5] = 32,
+ [0][1][RTW89_IC][5] = 74,
+ [0][1][RTW89_ACMA][5] = 22,
+ [0][1][RTW89_FCC][6] = 72,
+ [0][1][RTW89_ETSI][6] = 26,
+ [0][1][RTW89_MKK][6] = 32,
+ [0][1][RTW89_IC][6] = 72,
+ [0][1][RTW89_ACMA][6] = 22,
+ [0][1][RTW89_FCC][7] = 68,
+ [0][1][RTW89_ETSI][7] = 26,
+ [0][1][RTW89_MKK][7] = 32,
+ [0][1][RTW89_IC][7] = 68,
+ [0][1][RTW89_ACMA][7] = 22,
+ [0][1][RTW89_FCC][8] = 64,
+ [0][1][RTW89_ETSI][8] = 26,
+ [0][1][RTW89_MKK][8] = 32,
+ [0][1][RTW89_IC][8] = 64,
+ [0][1][RTW89_ACMA][8] = 22,
+ [0][1][RTW89_FCC][9] = 60,
+ [0][1][RTW89_ETSI][9] = 26,
+ [0][1][RTW89_MKK][9] = 32,
+ [0][1][RTW89_IC][9] = 60,
+ [0][1][RTW89_ACMA][9] = 22,
+ [0][1][RTW89_FCC][10] = 60,
+ [0][1][RTW89_ETSI][10] = 26,
+ [0][1][RTW89_MKK][10] = 32,
+ [0][1][RTW89_IC][10] = 60,
+ [0][1][RTW89_ACMA][10] = 22,
+ [0][1][RTW89_FCC][11] = 52,
+ [0][1][RTW89_ETSI][11] = 26,
+ [0][1][RTW89_MKK][11] = 32,
+ [0][1][RTW89_IC][11] = 52,
+ [0][1][RTW89_ACMA][11] = 22,
+ [0][1][RTW89_FCC][12] = 30,
+ [0][1][RTW89_ETSI][12] = 22,
+ [0][1][RTW89_MKK][12] = 26,
+ [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_FCC][13] = 127,
+ [0][1][RTW89_ETSI][13] = 127,
+ [0][1][RTW89_MKK][13] = 127,
+ [0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_ACMA][13] = 127,
+ [1][0][RTW89_FCC][0] = 78,
+ [1][0][RTW89_ETSI][0] = 48,
+ [1][0][RTW89_MKK][0] = 48,
+ [1][0][RTW89_IC][0] = 78,
+ [1][0][RTW89_ACMA][0] = 42,
+ [1][0][RTW89_FCC][1] = 78,
+ [1][0][RTW89_ETSI][1] = 48,
+ [1][0][RTW89_MKK][1] = 48,
+ [1][0][RTW89_IC][1] = 78,
+ [1][0][RTW89_ACMA][1] = 44,
+ [1][0][RTW89_FCC][2] = 82,
+ [1][0][RTW89_ETSI][2] = 48,
+ [1][0][RTW89_MKK][2] = 48,
+ [1][0][RTW89_IC][2] = 82,
+ [1][0][RTW89_ACMA][2] = 44,
+ [1][0][RTW89_FCC][3] = 84,
+ [1][0][RTW89_ETSI][3] = 48,
+ [1][0][RTW89_MKK][3] = 48,
+ [1][0][RTW89_IC][3] = 84,
+ [1][0][RTW89_ACMA][3] = 44,
+ [1][0][RTW89_FCC][4] = 84,
+ [1][0][RTW89_ETSI][4] = 48,
+ [1][0][RTW89_MKK][4] = 48,
+ [1][0][RTW89_IC][4] = 84,
+ [1][0][RTW89_ACMA][4] = 44,
+ [1][0][RTW89_FCC][5] = 84,
+ [1][0][RTW89_ETSI][5] = 48,
+ [1][0][RTW89_MKK][5] = 48,
+ [1][0][RTW89_IC][5] = 84,
+ [1][0][RTW89_ACMA][5] = 44,
+ [1][0][RTW89_FCC][6] = 78,
+ [1][0][RTW89_ETSI][6] = 46,
+ [1][0][RTW89_MKK][6] = 48,
+ [1][0][RTW89_IC][6] = 78,
+ [1][0][RTW89_ACMA][6] = 44,
+ [1][0][RTW89_FCC][7] = 78,
+ [1][0][RTW89_ETSI][7] = 48,
+ [1][0][RTW89_MKK][7] = 48,
+ [1][0][RTW89_IC][7] = 78,
+ [1][0][RTW89_ACMA][7] = 44,
+ [1][0][RTW89_FCC][8] = 78,
+ [1][0][RTW89_ETSI][8] = 48,
+ [1][0][RTW89_MKK][8] = 48,
+ [1][0][RTW89_IC][8] = 78,
+ [1][0][RTW89_ACMA][8] = 44,
+ [1][0][RTW89_FCC][9] = 74,
+ [1][0][RTW89_ETSI][9] = 48,
+ [1][0][RTW89_MKK][9] = 48,
+ [1][0][RTW89_IC][9] = 74,
+ [1][0][RTW89_ACMA][9] = 44,
+ [1][0][RTW89_FCC][10] = 74,
+ [1][0][RTW89_ETSI][10] = 48,
+ [1][0][RTW89_MKK][10] = 48,
+ [1][0][RTW89_IC][10] = 74,
+ [1][0][RTW89_ACMA][10] = 44,
+ [1][0][RTW89_FCC][11] = 72,
+ [1][0][RTW89_ETSI][11] = 48,
+ [1][0][RTW89_MKK][11] = 48,
+ [1][0][RTW89_IC][11] = 72,
+ [1][0][RTW89_ACMA][11] = 44,
+ [1][0][RTW89_FCC][12] = 38,
+ [1][0][RTW89_ETSI][12] = 48,
+ [1][0][RTW89_MKK][12] = 48,
+ [1][0][RTW89_IC][12] = 38,
+ [1][0][RTW89_ACMA][12] = 42,
+ [1][0][RTW89_FCC][13] = 127,
+ [1][0][RTW89_ETSI][13] = 127,
+ [1][0][RTW89_MKK][13] = 127,
+ [1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_ACMA][13] = 127,
+ [1][1][RTW89_FCC][0] = 66,
+ [1][1][RTW89_ETSI][0] = 34,
+ [1][1][RTW89_MKK][0] = 36,
+ [1][1][RTW89_IC][0] = 66,
+ [1][1][RTW89_ACMA][0] = 32,
+ [1][1][RTW89_FCC][1] = 66,
+ [1][1][RTW89_ETSI][1] = 36,
+ [1][1][RTW89_MKK][1] = 36,
+ [1][1][RTW89_IC][1] = 66,
+ [1][1][RTW89_ACMA][1] = 32,
+ [1][1][RTW89_FCC][2] = 70,
+ [1][1][RTW89_ETSI][2] = 36,
+ [1][1][RTW89_MKK][2] = 36,
+ [1][1][RTW89_IC][2] = 70,
+ [1][1][RTW89_ACMA][2] = 32,
+ [1][1][RTW89_FCC][3] = 74,
+ [1][1][RTW89_ETSI][3] = 36,
+ [1][1][RTW89_MKK][3] = 36,
+ [1][1][RTW89_IC][3] = 74,
+ [1][1][RTW89_ACMA][3] = 32,
+ [1][1][RTW89_FCC][4] = 74,
+ [1][1][RTW89_ETSI][4] = 36,
+ [1][1][RTW89_MKK][4] = 36,
+ [1][1][RTW89_IC][4] = 74,
+ [1][1][RTW89_ACMA][4] = 32,
+ [1][1][RTW89_FCC][5] = 74,
+ [1][1][RTW89_ETSI][5] = 36,
+ [1][1][RTW89_MKK][5] = 36,
+ [1][1][RTW89_IC][5] = 74,
+ [1][1][RTW89_ACMA][5] = 32,
+ [1][1][RTW89_FCC][6] = 74,
+ [1][1][RTW89_ETSI][6] = 36,
+ [1][1][RTW89_MKK][6] = 36,
+ [1][1][RTW89_IC][6] = 74,
+ [1][1][RTW89_ACMA][6] = 32,
+ [1][1][RTW89_FCC][7] = 74,
+ [1][1][RTW89_ETSI][7] = 36,
+ [1][1][RTW89_MKK][7] = 36,
+ [1][1][RTW89_IC][7] = 74,
+ [1][1][RTW89_ACMA][7] = 32,
+ [1][1][RTW89_FCC][8] = 70,
+ [1][1][RTW89_ETSI][8] = 36,
+ [1][1][RTW89_MKK][8] = 36,
+ [1][1][RTW89_IC][8] = 70,
+ [1][1][RTW89_ACMA][8] = 32,
+ [1][1][RTW89_FCC][9] = 66,
+ [1][1][RTW89_ETSI][9] = 36,
+ [1][1][RTW89_MKK][9] = 36,
+ [1][1][RTW89_IC][9] = 66,
+ [1][1][RTW89_ACMA][9] = 32,
+ [1][1][RTW89_FCC][10] = 66,
+ [1][1][RTW89_ETSI][10] = 36,
+ [1][1][RTW89_MKK][10] = 36,
+ [1][1][RTW89_IC][10] = 66,
+ [1][1][RTW89_ACMA][10] = 32,
+ [1][1][RTW89_FCC][11] = 48,
+ [1][1][RTW89_ETSI][11] = 36,
+ [1][1][RTW89_MKK][11] = 36,
+ [1][1][RTW89_IC][11] = 48,
+ [1][1][RTW89_ACMA][11] = 32,
+ [1][1][RTW89_FCC][12] = 32,
+ [1][1][RTW89_ETSI][12] = 36,
+ [1][1][RTW89_MKK][12] = 36,
+ [1][1][RTW89_IC][12] = 32,
+ [1][1][RTW89_ACMA][12] = 32,
+ [1][1][RTW89_FCC][13] = 127,
+ [1][1][RTW89_ETSI][13] = 127,
+ [1][1][RTW89_MKK][13] = 127,
+ [1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_ACMA][13] = 127,
+ [2][0][RTW89_FCC][0] = 78,
+ [2][0][RTW89_ETSI][0] = 60,
+ [2][0][RTW89_MKK][0] = 60,
+ [2][0][RTW89_IC][0] = 78,
+ [2][0][RTW89_ACMA][0] = 56,
+ [2][0][RTW89_FCC][1] = 78,
+ [2][0][RTW89_ETSI][1] = 60,
+ [2][0][RTW89_MKK][1] = 60,
+ [2][0][RTW89_IC][1] = 78,
+ [2][0][RTW89_ACMA][1] = 56,
+ [2][0][RTW89_FCC][2] = 80,
+ [2][0][RTW89_ETSI][2] = 60,
+ [2][0][RTW89_MKK][2] = 60,
+ [2][0][RTW89_IC][2] = 80,
+ [2][0][RTW89_ACMA][2] = 56,
+ [2][0][RTW89_FCC][3] = 80,
+ [2][0][RTW89_ETSI][3] = 60,
+ [2][0][RTW89_MKK][3] = 60,
+ [2][0][RTW89_IC][3] = 80,
+ [2][0][RTW89_ACMA][3] = 56,
+ [2][0][RTW89_FCC][4] = 80,
+ [2][0][RTW89_ETSI][4] = 60,
+ [2][0][RTW89_MKK][4] = 60,
+ [2][0][RTW89_IC][4] = 80,
+ [2][0][RTW89_ACMA][4] = 56,
+ [2][0][RTW89_FCC][5] = 84,
+ [2][0][RTW89_ETSI][5] = 60,
+ [2][0][RTW89_MKK][5] = 60,
+ [2][0][RTW89_IC][5] = 84,
+ [2][0][RTW89_ACMA][5] = 56,
+ [2][0][RTW89_FCC][6] = 76,
+ [2][0][RTW89_ETSI][6] = 58,
+ [2][0][RTW89_MKK][6] = 60,
+ [2][0][RTW89_IC][6] = 76,
+ [2][0][RTW89_ACMA][6] = 56,
+ [2][0][RTW89_FCC][7] = 76,
+ [2][0][RTW89_ETSI][7] = 60,
+ [2][0][RTW89_MKK][7] = 60,
+ [2][0][RTW89_IC][7] = 76,
+ [2][0][RTW89_ACMA][7] = 56,
+ [2][0][RTW89_FCC][8] = 76,
+ [2][0][RTW89_ETSI][8] = 60,
+ [2][0][RTW89_MKK][8] = 60,
+ [2][0][RTW89_IC][8] = 76,
+ [2][0][RTW89_ACMA][8] = 56,
+ [2][0][RTW89_FCC][9] = 74,
+ [2][0][RTW89_ETSI][9] = 60,
+ [2][0][RTW89_MKK][9] = 60,
+ [2][0][RTW89_IC][9] = 74,
+ [2][0][RTW89_ACMA][9] = 56,
+ [2][0][RTW89_FCC][10] = 74,
+ [2][0][RTW89_ETSI][10] = 60,
+ [2][0][RTW89_MKK][10] = 60,
+ [2][0][RTW89_IC][10] = 74,
+ [2][0][RTW89_ACMA][10] = 56,
+ [2][0][RTW89_FCC][11] = 66,
+ [2][0][RTW89_ETSI][11] = 60,
+ [2][0][RTW89_MKK][11] = 60,
+ [2][0][RTW89_IC][11] = 66,
+ [2][0][RTW89_ACMA][11] = 56,
+ [2][0][RTW89_FCC][12] = 56,
+ [2][0][RTW89_ETSI][12] = 60,
+ [2][0][RTW89_MKK][12] = 60,
+ [2][0][RTW89_IC][12] = 56,
+ [2][0][RTW89_ACMA][12] = 56,
+ [2][0][RTW89_FCC][13] = 127,
+ [2][0][RTW89_ETSI][13] = 127,
+ [2][0][RTW89_MKK][13] = 127,
+ [2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_FCC][0] = 70,
+ [2][1][RTW89_ETSI][0] = 48,
+ [2][1][RTW89_MKK][0] = 48,
+ [2][1][RTW89_IC][0] = 70,
+ [2][1][RTW89_ACMA][0] = 44,
+ [2][1][RTW89_FCC][1] = 70,
+ [2][1][RTW89_ETSI][1] = 48,
+ [2][1][RTW89_MKK][1] = 48,
+ [2][1][RTW89_IC][1] = 70,
+ [2][1][RTW89_ACMA][1] = 44,
+ [2][1][RTW89_FCC][2] = 74,
+ [2][1][RTW89_ETSI][2] = 48,
+ [2][1][RTW89_MKK][2] = 48,
+ [2][1][RTW89_IC][2] = 74,
+ [2][1][RTW89_ACMA][2] = 44,
+ [2][1][RTW89_FCC][3] = 78,
+ [2][1][RTW89_ETSI][3] = 48,
+ [2][1][RTW89_MKK][3] = 48,
+ [2][1][RTW89_IC][3] = 78,
+ [2][1][RTW89_ACMA][3] = 44,
+ [2][1][RTW89_FCC][4] = 80,
+ [2][1][RTW89_ETSI][4] = 48,
+ [2][1][RTW89_MKK][4] = 48,
+ [2][1][RTW89_IC][4] = 80,
+ [2][1][RTW89_ACMA][4] = 44,
+ [2][1][RTW89_FCC][5] = 80,
+ [2][1][RTW89_ETSI][5] = 48,
+ [2][1][RTW89_MKK][5] = 48,
+ [2][1][RTW89_IC][5] = 80,
+ [2][1][RTW89_ACMA][5] = 44,
+ [2][1][RTW89_FCC][6] = 78,
+ [2][1][RTW89_ETSI][6] = 46,
+ [2][1][RTW89_MKK][6] = 48,
+ [2][1][RTW89_IC][6] = 78,
+ [2][1][RTW89_ACMA][6] = 44,
+ [2][1][RTW89_FCC][7] = 78,
+ [2][1][RTW89_ETSI][7] = 48,
+ [2][1][RTW89_MKK][7] = 48,
+ [2][1][RTW89_IC][7] = 78,
+ [2][1][RTW89_ACMA][7] = 44,
+ [2][1][RTW89_FCC][8] = 74,
+ [2][1][RTW89_ETSI][8] = 48,
+ [2][1][RTW89_MKK][8] = 48,
+ [2][1][RTW89_IC][8] = 74,
+ [2][1][RTW89_ACMA][8] = 44,
+ [2][1][RTW89_FCC][9] = 70,
+ [2][1][RTW89_ETSI][9] = 48,
+ [2][1][RTW89_MKK][9] = 48,
+ [2][1][RTW89_IC][9] = 70,
+ [2][1][RTW89_ACMA][9] = 44,
+ [2][1][RTW89_FCC][10] = 70,
+ [2][1][RTW89_ETSI][10] = 48,
+ [2][1][RTW89_MKK][10] = 48,
+ [2][1][RTW89_IC][10] = 70,
+ [2][1][RTW89_ACMA][10] = 44,
+ [2][1][RTW89_FCC][11] = 60,
+ [2][1][RTW89_ETSI][11] = 48,
+ [2][1][RTW89_MKK][11] = 48,
+ [2][1][RTW89_IC][11] = 60,
+ [2][1][RTW89_ACMA][11] = 44,
+ [2][1][RTW89_FCC][12] = 44,
+ [2][1][RTW89_ETSI][12] = 46,
+ [2][1][RTW89_MKK][12] = 48,
+ [2][1][RTW89_IC][12] = 44,
+ [2][1][RTW89_ACMA][12] = 42,
+ [2][1][RTW89_FCC][13] = 127,
+ [2][1][RTW89_ETSI][13] = 127,
+ [2][1][RTW89_MKK][13] = 127,
+ [2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_ACMA][13] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 24,
+ [0][0][RTW89_WW][2] = 24,
+ [0][0][RTW89_WW][4] = 24,
+ [0][0][RTW89_WW][6] = 24,
+ [0][0][RTW89_WW][8] = 24,
+ [0][0][RTW89_WW][10] = 24,
+ [0][0][RTW89_WW][12] = 24,
+ [0][0][RTW89_WW][14] = 24,
+ [0][0][RTW89_WW][15] = 24,
+ [0][0][RTW89_WW][17] = 24,
+ [0][0][RTW89_WW][19] = 24,
+ [0][0][RTW89_WW][21] = 24,
+ [0][0][RTW89_WW][23] = 24,
+ [0][0][RTW89_WW][25] = 32,
+ [0][0][RTW89_WW][27] = 32,
+ [0][0][RTW89_WW][29] = 32,
+ [0][0][RTW89_WW][31] = 24,
+ [0][0][RTW89_WW][33] = 24,
+ [0][0][RTW89_WW][35] = 24,
+ [0][0][RTW89_WW][37] = 44,
+ [0][0][RTW89_WW][38] = 30,
+ [0][0][RTW89_WW][40] = 30,
+ [0][0][RTW89_WW][42] = 30,
+ [0][0][RTW89_WW][44] = 30,
+ [0][0][RTW89_WW][46] = 30,
+ [0][0][RTW89_WW][48] = 32,
+ [0][0][RTW89_WW][50] = 32,
+ [0][0][RTW89_WW][52] = 32,
+ [0][1][RTW89_WW][0] = 0,
+ [0][1][RTW89_WW][2] = 4,
+ [0][1][RTW89_WW][4] = 0,
+ [0][1][RTW89_WW][6] = 0,
+ [0][1][RTW89_WW][8] = 12,
+ [0][1][RTW89_WW][10] = 12,
+ [0][1][RTW89_WW][12] = 12,
+ [0][1][RTW89_WW][14] = 12,
+ [0][1][RTW89_WW][15] = 12,
+ [0][1][RTW89_WW][17] = 12,
+ [0][1][RTW89_WW][19] = 12,
+ [0][1][RTW89_WW][21] = 12,
+ [0][1][RTW89_WW][23] = 12,
+ [0][1][RTW89_WW][25] = 20,
+ [0][1][RTW89_WW][27] = 18,
+ [0][1][RTW89_WW][29] = 18,
+ [0][1][RTW89_WW][31] = 12,
+ [0][1][RTW89_WW][33] = 12,
+ [0][1][RTW89_WW][35] = 12,
+ [0][1][RTW89_WW][37] = 34,
+ [0][1][RTW89_WW][38] = 18,
+ [0][1][RTW89_WW][40] = 18,
+ [0][1][RTW89_WW][42] = 18,
+ [0][1][RTW89_WW][44] = 18,
+ [0][1][RTW89_WW][46] = 18,
+ [0][1][RTW89_WW][48] = 20,
+ [0][1][RTW89_WW][50] = 20,
+ [0][1][RTW89_WW][52] = 20,
+ [1][0][RTW89_WW][0] = 34,
+ [1][0][RTW89_WW][2] = 34,
+ [1][0][RTW89_WW][4] = 34,
+ [1][0][RTW89_WW][6] = 34,
+ [1][0][RTW89_WW][8] = 34,
+ [1][0][RTW89_WW][10] = 34,
+ [1][0][RTW89_WW][12] = 34,
+ [1][0][RTW89_WW][14] = 34,
+ [1][0][RTW89_WW][15] = 34,
+ [1][0][RTW89_WW][17] = 34,
+ [1][0][RTW89_WW][19] = 34,
+ [1][0][RTW89_WW][21] = 34,
+ [1][0][RTW89_WW][23] = 34,
+ [1][0][RTW89_WW][25] = 42,
+ [1][0][RTW89_WW][27] = 44,
+ [1][0][RTW89_WW][29] = 44,
+ [1][0][RTW89_WW][31] = 34,
+ [1][0][RTW89_WW][33] = 34,
+ [1][0][RTW89_WW][35] = 34,
+ [1][0][RTW89_WW][37] = 52,
+ [1][0][RTW89_WW][38] = 30,
+ [1][0][RTW89_WW][40] = 30,
+ [1][0][RTW89_WW][42] = 30,
+ [1][0][RTW89_WW][44] = 30,
+ [1][0][RTW89_WW][46] = 30,
+ [1][0][RTW89_WW][48] = 44,
+ [1][0][RTW89_WW][50] = 44,
+ [1][0][RTW89_WW][52] = 44,
+ [1][1][RTW89_WW][0] = 10,
+ [1][1][RTW89_WW][2] = 14,
+ [1][1][RTW89_WW][4] = 10,
+ [1][1][RTW89_WW][6] = 10,
+ [1][1][RTW89_WW][8] = 20,
+ [1][1][RTW89_WW][10] = 20,
+ [1][1][RTW89_WW][12] = 22,
+ [1][1][RTW89_WW][14] = 22,
+ [1][1][RTW89_WW][15] = 22,
+ [1][1][RTW89_WW][17] = 22,
+ [1][1][RTW89_WW][19] = 22,
+ [1][1][RTW89_WW][21] = 22,
+ [1][1][RTW89_WW][23] = 22,
+ [1][1][RTW89_WW][25] = 30,
+ [1][1][RTW89_WW][27] = 32,
+ [1][1][RTW89_WW][29] = 32,
+ [1][1][RTW89_WW][31] = 22,
+ [1][1][RTW89_WW][33] = 22,
+ [1][1][RTW89_WW][35] = 22,
+ [1][1][RTW89_WW][37] = 42,
+ [1][1][RTW89_WW][38] = 18,
+ [1][1][RTW89_WW][40] = 18,
+ [1][1][RTW89_WW][42] = 18,
+ [1][1][RTW89_WW][44] = 18,
+ [1][1][RTW89_WW][46] = 18,
+ [1][1][RTW89_WW][48] = 32,
+ [1][1][RTW89_WW][50] = 32,
+ [1][1][RTW89_WW][52] = 32,
+ [2][0][RTW89_WW][0] = 46,
+ [2][0][RTW89_WW][2] = 46,
+ [2][0][RTW89_WW][4] = 46,
+ [2][0][RTW89_WW][6] = 46,
+ [2][0][RTW89_WW][8] = 48,
+ [2][0][RTW89_WW][10] = 48,
+ [2][0][RTW89_WW][12] = 46,
+ [2][0][RTW89_WW][14] = 46,
+ [2][0][RTW89_WW][15] = 48,
+ [2][0][RTW89_WW][17] = 48,
+ [2][0][RTW89_WW][19] = 48,
+ [2][0][RTW89_WW][21] = 48,
+ [2][0][RTW89_WW][23] = 48,
+ [2][0][RTW89_WW][25] = 54,
+ [2][0][RTW89_WW][27] = 54,
+ [2][0][RTW89_WW][29] = 54,
+ [2][0][RTW89_WW][31] = 48,
+ [2][0][RTW89_WW][33] = 48,
+ [2][0][RTW89_WW][35] = 48,
+ [2][0][RTW89_WW][37] = 66,
+ [2][0][RTW89_WW][38] = 30,
+ [2][0][RTW89_WW][40] = 30,
+ [2][0][RTW89_WW][42] = 30,
+ [2][0][RTW89_WW][44] = 30,
+ [2][0][RTW89_WW][46] = 30,
+ [2][0][RTW89_WW][48] = 56,
+ [2][0][RTW89_WW][50] = 56,
+ [2][0][RTW89_WW][52] = 56,
+ [2][1][RTW89_WW][0] = 20,
+ [2][1][RTW89_WW][2] = 18,
+ [2][1][RTW89_WW][4] = 22,
+ [2][1][RTW89_WW][6] = 22,
+ [2][1][RTW89_WW][8] = 34,
+ [2][1][RTW89_WW][10] = 34,
+ [2][1][RTW89_WW][12] = 36,
+ [2][1][RTW89_WW][14] = 36,
+ [2][1][RTW89_WW][15] = 36,
+ [2][1][RTW89_WW][17] = 36,
+ [2][1][RTW89_WW][19] = 36,
+ [2][1][RTW89_WW][21] = 36,
+ [2][1][RTW89_WW][23] = 36,
+ [2][1][RTW89_WW][25] = 42,
+ [2][1][RTW89_WW][27] = 42,
+ [2][1][RTW89_WW][29] = 42,
+ [2][1][RTW89_WW][31] = 36,
+ [2][1][RTW89_WW][33] = 36,
+ [2][1][RTW89_WW][35] = 36,
+ [2][1][RTW89_WW][37] = 50,
+ [2][1][RTW89_WW][38] = 18,
+ [2][1][RTW89_WW][40] = 18,
+ [2][1][RTW89_WW][42] = 18,
+ [2][1][RTW89_WW][44] = 18,
+ [2][1][RTW89_WW][46] = 18,
+ [2][1][RTW89_WW][48] = 44,
+ [2][1][RTW89_WW][50] = 44,
+ [2][1][RTW89_WW][52] = 44,
+ [0][0][RTW89_FCC][0] = 52,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_MKK][0] = 26,
+ [0][0][RTW89_IC][0] = 24,
+ [0][0][RTW89_ACMA][0] = 24,
+ [0][0][RTW89_FCC][2] = 52,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_MKK][2] = 26,
+ [0][0][RTW89_IC][2] = 24,
+ [0][0][RTW89_ACMA][2] = 24,
+ [0][0][RTW89_FCC][4] = 52,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_MKK][4] = 26,
+ [0][0][RTW89_IC][4] = 24,
+ [0][0][RTW89_ACMA][4] = 24,
+ [0][0][RTW89_FCC][6] = 52,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_MKK][6] = 26,
+ [0][0][RTW89_IC][6] = 24,
+ [0][0][RTW89_ACMA][6] = 24,
+ [0][0][RTW89_FCC][8] = 52,
+ [0][0][RTW89_ETSI][8] = 30,
+ [0][0][RTW89_MKK][8] = 26,
+ [0][0][RTW89_IC][8] = 52,
+ [0][0][RTW89_ACMA][8] = 24,
+ [0][0][RTW89_FCC][10] = 52,
+ [0][0][RTW89_ETSI][10] = 30,
+ [0][0][RTW89_MKK][10] = 26,
+ [0][0][RTW89_IC][10] = 52,
+ [0][0][RTW89_ACMA][10] = 24,
+ [0][0][RTW89_FCC][12] = 52,
+ [0][0][RTW89_ETSI][12] = 30,
+ [0][0][RTW89_MKK][12] = 24,
+ [0][0][RTW89_IC][12] = 52,
+ [0][0][RTW89_ACMA][12] = 24,
+ [0][0][RTW89_FCC][14] = 52,
+ [0][0][RTW89_ETSI][14] = 30,
+ [0][0][RTW89_MKK][14] = 24,
+ [0][0][RTW89_IC][14] = 52,
+ [0][0][RTW89_ACMA][14] = 24,
+ [0][0][RTW89_FCC][15] = 52,
+ [0][0][RTW89_ETSI][15] = 32,
+ [0][0][RTW89_MKK][15] = 46,
+ [0][0][RTW89_IC][15] = 52,
+ [0][0][RTW89_ACMA][15] = 24,
+ [0][0][RTW89_FCC][17] = 52,
+ [0][0][RTW89_ETSI][17] = 32,
+ [0][0][RTW89_MKK][17] = 48,
+ [0][0][RTW89_IC][17] = 52,
+ [0][0][RTW89_ACMA][17] = 24,
+ [0][0][RTW89_FCC][19] = 52,
+ [0][0][RTW89_ETSI][19] = 32,
+ [0][0][RTW89_MKK][19] = 48,
+ [0][0][RTW89_IC][19] = 52,
+ [0][0][RTW89_ACMA][19] = 24,
+ [0][0][RTW89_FCC][21] = 52,
+ [0][0][RTW89_ETSI][21] = 32,
+ [0][0][RTW89_MKK][21] = 48,
+ [0][0][RTW89_IC][21] = 52,
+ [0][0][RTW89_ACMA][21] = 24,
+ [0][0][RTW89_FCC][23] = 52,
+ [0][0][RTW89_ETSI][23] = 32,
+ [0][0][RTW89_MKK][23] = 48,
+ [0][0][RTW89_IC][23] = 52,
+ [0][0][RTW89_ACMA][23] = 24,
+ [0][0][RTW89_FCC][25] = 52,
+ [0][0][RTW89_ETSI][25] = 32,
+ [0][0][RTW89_MKK][25] = 48,
+ [0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_ACMA][25] = 127,
+ [0][0][RTW89_FCC][27] = 52,
+ [0][0][RTW89_ETSI][27] = 32,
+ [0][0][RTW89_MKK][27] = 48,
+ [0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_ACMA][27] = 127,
+ [0][0][RTW89_FCC][29] = 52,
+ [0][0][RTW89_ETSI][29] = 32,
+ [0][0][RTW89_MKK][29] = 48,
+ [0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_ACMA][29] = 127,
+ [0][0][RTW89_FCC][31] = 52,
+ [0][0][RTW89_ETSI][31] = 32,
+ [0][0][RTW89_MKK][31] = 48,
+ [0][0][RTW89_IC][31] = 52,
+ [0][0][RTW89_ACMA][31] = 24,
+ [0][0][RTW89_FCC][33] = 52,
+ [0][0][RTW89_ETSI][33] = 32,
+ [0][0][RTW89_MKK][33] = 48,
+ [0][0][RTW89_IC][33] = 52,
+ [0][0][RTW89_ACMA][33] = 24,
+ [0][0][RTW89_FCC][35] = 52,
+ [0][0][RTW89_ETSI][35] = 32,
+ [0][0][RTW89_MKK][35] = 48,
+ [0][0][RTW89_IC][35] = 52,
+ [0][0][RTW89_ACMA][35] = 24,
+ [0][0][RTW89_FCC][37] = 52,
+ [0][0][RTW89_ETSI][37] = 127,
+ [0][0][RTW89_MKK][37] = 44,
+ [0][0][RTW89_IC][37] = 52,
+ [0][0][RTW89_ACMA][37] = 52,
+ [0][0][RTW89_FCC][38] = 84,
+ [0][0][RTW89_ETSI][38] = 30,
+ [0][0][RTW89_MKK][38] = 127,
+ [0][0][RTW89_IC][38] = 84,
+ [0][0][RTW89_ACMA][38] = 84,
+ [0][0][RTW89_FCC][40] = 84,
+ [0][0][RTW89_ETSI][40] = 30,
+ [0][0][RTW89_MKK][40] = 127,
+ [0][0][RTW89_IC][40] = 84,
+ [0][0][RTW89_ACMA][40] = 84,
+ [0][0][RTW89_FCC][42] = 84,
+ [0][0][RTW89_ETSI][42] = 30,
+ [0][0][RTW89_MKK][42] = 127,
+ [0][0][RTW89_IC][42] = 84,
+ [0][0][RTW89_ACMA][42] = 84,
+ [0][0][RTW89_FCC][44] = 84,
+ [0][0][RTW89_ETSI][44] = 30,
+ [0][0][RTW89_MKK][44] = 127,
+ [0][0][RTW89_IC][44] = 84,
+ [0][0][RTW89_ACMA][44] = 84,
+ [0][0][RTW89_FCC][46] = 84,
+ [0][0][RTW89_ETSI][46] = 30,
+ [0][0][RTW89_MKK][46] = 127,
+ [0][0][RTW89_IC][46] = 84,
+ [0][0][RTW89_ACMA][46] = 84,
+ [0][0][RTW89_FCC][48] = 32,
+ [0][0][RTW89_ETSI][48] = 127,
+ [0][0][RTW89_MKK][48] = 127,
+ [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_ACMA][48] = 127,
+ [0][0][RTW89_FCC][50] = 32,
+ [0][0][RTW89_ETSI][50] = 127,
+ [0][0][RTW89_MKK][50] = 127,
+ [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_ACMA][50] = 127,
+ [0][0][RTW89_FCC][52] = 32,
+ [0][0][RTW89_ETSI][52] = 127,
+ [0][0][RTW89_MKK][52] = 127,
+ [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_ACMA][52] = 127,
+ [0][1][RTW89_FCC][0] = 34,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_MKK][0] = 12,
+ [0][1][RTW89_IC][0] = 0,
+ [0][1][RTW89_ACMA][0] = 12,
+ [0][1][RTW89_FCC][2] = 38,
+ [0][1][RTW89_ETSI][2] = 20,
+ [0][1][RTW89_MKK][2] = 12,
+ [0][1][RTW89_IC][2] = 4,
+ [0][1][RTW89_ACMA][2] = 12,
+ [0][1][RTW89_FCC][4] = 34,
+ [0][1][RTW89_ETSI][4] = 20,
+ [0][1][RTW89_MKK][4] = 14,
+ [0][1][RTW89_IC][4] = 0,
+ [0][1][RTW89_ACMA][4] = 12,
+ [0][1][RTW89_FCC][6] = 34,
+ [0][1][RTW89_ETSI][6] = 20,
+ [0][1][RTW89_MKK][6] = 14,
+ [0][1][RTW89_IC][6] = 0,
+ [0][1][RTW89_ACMA][6] = 12,
+ [0][1][RTW89_FCC][8] = 34,
+ [0][1][RTW89_ETSI][8] = 18,
+ [0][1][RTW89_MKK][8] = 14,
+ [0][1][RTW89_IC][8] = 34,
+ [0][1][RTW89_ACMA][8] = 12,
+ [0][1][RTW89_FCC][10] = 34,
+ [0][1][RTW89_ETSI][10] = 18,
+ [0][1][RTW89_MKK][10] = 14,
+ [0][1][RTW89_IC][10] = 34,
+ [0][1][RTW89_ACMA][10] = 12,
+ [0][1][RTW89_FCC][12] = 38,
+ [0][1][RTW89_ETSI][12] = 18,
+ [0][1][RTW89_MKK][12] = 12,
+ [0][1][RTW89_IC][12] = 38,
+ [0][1][RTW89_ACMA][12] = 12,
+ [0][1][RTW89_FCC][14] = 34,
+ [0][1][RTW89_ETSI][14] = 18,
+ [0][1][RTW89_MKK][14] = 12,
+ [0][1][RTW89_IC][14] = 34,
+ [0][1][RTW89_ACMA][14] = 12,
+ [0][1][RTW89_FCC][15] = 34,
+ [0][1][RTW89_ETSI][15] = 20,
+ [0][1][RTW89_MKK][15] = 32,
+ [0][1][RTW89_IC][15] = 34,
+ [0][1][RTW89_ACMA][15] = 12,
+ [0][1][RTW89_FCC][17] = 34,
+ [0][1][RTW89_ETSI][17] = 20,
+ [0][1][RTW89_MKK][17] = 34,
+ [0][1][RTW89_IC][17] = 34,
+ [0][1][RTW89_ACMA][17] = 12,
+ [0][1][RTW89_FCC][19] = 38,
+ [0][1][RTW89_ETSI][19] = 20,
+ [0][1][RTW89_MKK][19] = 34,
+ [0][1][RTW89_IC][19] = 38,
+ [0][1][RTW89_ACMA][19] = 12,
+ [0][1][RTW89_FCC][21] = 38,
+ [0][1][RTW89_ETSI][21] = 20,
+ [0][1][RTW89_MKK][21] = 34,
+ [0][1][RTW89_IC][21] = 38,
+ [0][1][RTW89_ACMA][21] = 12,
+ [0][1][RTW89_FCC][23] = 38,
+ [0][1][RTW89_ETSI][23] = 20,
+ [0][1][RTW89_MKK][23] = 34,
+ [0][1][RTW89_IC][23] = 38,
+ [0][1][RTW89_ACMA][23] = 12,
+ [0][1][RTW89_FCC][25] = 38,
+ [0][1][RTW89_ETSI][25] = 20,
+ [0][1][RTW89_MKK][25] = 34,
+ [0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_ACMA][25] = 127,
+ [0][1][RTW89_FCC][27] = 38,
+ [0][1][RTW89_ETSI][27] = 18,
+ [0][1][RTW89_MKK][27] = 34,
+ [0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_ACMA][27] = 127,
+ [0][1][RTW89_FCC][29] = 38,
+ [0][1][RTW89_ETSI][29] = 18,
+ [0][1][RTW89_MKK][29] = 34,
+ [0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_ACMA][29] = 127,
+ [0][1][RTW89_FCC][31] = 38,
+ [0][1][RTW89_ETSI][31] = 18,
+ [0][1][RTW89_MKK][31] = 34,
+ [0][1][RTW89_IC][31] = 34,
+ [0][1][RTW89_ACMA][31] = 12,
+ [0][1][RTW89_FCC][33] = 34,
+ [0][1][RTW89_ETSI][33] = 18,
+ [0][1][RTW89_MKK][33] = 34,
+ [0][1][RTW89_IC][33] = 34,
+ [0][1][RTW89_ACMA][33] = 12,
+ [0][1][RTW89_FCC][35] = 34,
+ [0][1][RTW89_ETSI][35] = 18,
+ [0][1][RTW89_MKK][35] = 34,
+ [0][1][RTW89_IC][35] = 34,
+ [0][1][RTW89_ACMA][35] = 12,
+ [0][1][RTW89_FCC][37] = 38,
+ [0][1][RTW89_ETSI][37] = 127,
+ [0][1][RTW89_MKK][37] = 34,
+ [0][1][RTW89_IC][37] = 38,
+ [0][1][RTW89_ACMA][37] = 38,
+ [0][1][RTW89_FCC][38] = 82,
+ [0][1][RTW89_ETSI][38] = 18,
+ [0][1][RTW89_MKK][38] = 127,
+ [0][1][RTW89_IC][38] = 82,
+ [0][1][RTW89_ACMA][38] = 84,
+ [0][1][RTW89_FCC][40] = 82,
+ [0][1][RTW89_ETSI][40] = 18,
+ [0][1][RTW89_MKK][40] = 127,
+ [0][1][RTW89_IC][40] = 82,
+ [0][1][RTW89_ACMA][40] = 84,
+ [0][1][RTW89_FCC][42] = 82,
+ [0][1][RTW89_ETSI][42] = 18,
+ [0][1][RTW89_MKK][42] = 127,
+ [0][1][RTW89_IC][42] = 82,
+ [0][1][RTW89_ACMA][42] = 84,
+ [0][1][RTW89_FCC][44] = 82,
+ [0][1][RTW89_ETSI][44] = 18,
+ [0][1][RTW89_MKK][44] = 127,
+ [0][1][RTW89_IC][44] = 82,
+ [0][1][RTW89_ACMA][44] = 84,
+ [0][1][RTW89_FCC][46] = 82,
+ [0][1][RTW89_ETSI][46] = 18,
+ [0][1][RTW89_MKK][46] = 127,
+ [0][1][RTW89_IC][46] = 82,
+ [0][1][RTW89_ACMA][46] = 84,
+ [0][1][RTW89_FCC][48] = 20,
+ [0][1][RTW89_ETSI][48] = 127,
+ [0][1][RTW89_MKK][48] = 127,
+ [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_ACMA][48] = 127,
+ [0][1][RTW89_FCC][50] = 20,
+ [0][1][RTW89_ETSI][50] = 127,
+ [0][1][RTW89_MKK][50] = 127,
+ [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_ACMA][50] = 127,
+ [0][1][RTW89_FCC][52] = 20,
+ [0][1][RTW89_ETSI][52] = 127,
+ [0][1][RTW89_MKK][52] = 127,
+ [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_ACMA][52] = 127,
+ [1][0][RTW89_FCC][0] = 62,
+ [1][0][RTW89_ETSI][0] = 42,
+ [1][0][RTW89_MKK][0] = 36,
+ [1][0][RTW89_IC][0] = 36,
+ [1][0][RTW89_ACMA][0] = 34,
+ [1][0][RTW89_FCC][2] = 62,
+ [1][0][RTW89_ETSI][2] = 42,
+ [1][0][RTW89_MKK][2] = 36,
+ [1][0][RTW89_IC][2] = 36,
+ [1][0][RTW89_ACMA][2] = 34,
+ [1][0][RTW89_FCC][4] = 62,
+ [1][0][RTW89_ETSI][4] = 42,
+ [1][0][RTW89_MKK][4] = 34,
+ [1][0][RTW89_IC][4] = 36,
+ [1][0][RTW89_ACMA][4] = 34,
+ [1][0][RTW89_FCC][6] = 62,
+ [1][0][RTW89_ETSI][6] = 42,
+ [1][0][RTW89_MKK][6] = 34,
+ [1][0][RTW89_IC][6] = 36,
+ [1][0][RTW89_ACMA][6] = 34,
+ [1][0][RTW89_FCC][8] = 62,
+ [1][0][RTW89_ETSI][8] = 42,
+ [1][0][RTW89_MKK][8] = 36,
+ [1][0][RTW89_IC][8] = 62,
+ [1][0][RTW89_ACMA][8] = 34,
+ [1][0][RTW89_FCC][10] = 62,
+ [1][0][RTW89_ETSI][10] = 42,
+ [1][0][RTW89_MKK][10] = 36,
+ [1][0][RTW89_IC][10] = 62,
+ [1][0][RTW89_ACMA][10] = 34,
+ [1][0][RTW89_FCC][12] = 64,
+ [1][0][RTW89_ETSI][12] = 42,
+ [1][0][RTW89_MKK][12] = 36,
+ [1][0][RTW89_IC][12] = 64,
+ [1][0][RTW89_ACMA][12] = 34,
+ [1][0][RTW89_FCC][14] = 62,
+ [1][0][RTW89_ETSI][14] = 42,
+ [1][0][RTW89_MKK][14] = 36,
+ [1][0][RTW89_IC][14] = 62,
+ [1][0][RTW89_ACMA][14] = 34,
+ [1][0][RTW89_FCC][15] = 62,
+ [1][0][RTW89_ETSI][15] = 42,
+ [1][0][RTW89_MKK][15] = 54,
+ [1][0][RTW89_IC][15] = 62,
+ [1][0][RTW89_ACMA][15] = 34,
+ [1][0][RTW89_FCC][17] = 62,
+ [1][0][RTW89_ETSI][17] = 42,
+ [1][0][RTW89_MKK][17] = 58,
+ [1][0][RTW89_IC][17] = 62,
+ [1][0][RTW89_ACMA][17] = 34,
+ [1][0][RTW89_FCC][19] = 62,
+ [1][0][RTW89_ETSI][19] = 42,
+ [1][0][RTW89_MKK][19] = 58,
+ [1][0][RTW89_IC][19] = 62,
+ [1][0][RTW89_ACMA][19] = 34,
+ [1][0][RTW89_FCC][21] = 62,
+ [1][0][RTW89_ETSI][21] = 42,
+ [1][0][RTW89_MKK][21] = 58,
+ [1][0][RTW89_IC][21] = 62,
+ [1][0][RTW89_ACMA][21] = 34,
+ [1][0][RTW89_FCC][23] = 62,
+ [1][0][RTW89_ETSI][23] = 42,
+ [1][0][RTW89_MKK][23] = 58,
+ [1][0][RTW89_IC][23] = 62,
+ [1][0][RTW89_ACMA][23] = 34,
+ [1][0][RTW89_FCC][25] = 62,
+ [1][0][RTW89_ETSI][25] = 42,
+ [1][0][RTW89_MKK][25] = 58,
+ [1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_ACMA][25] = 127,
+ [1][0][RTW89_FCC][27] = 62,
+ [1][0][RTW89_ETSI][27] = 44,
+ [1][0][RTW89_MKK][27] = 58,
+ [1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_ACMA][27] = 127,
+ [1][0][RTW89_FCC][29] = 62,
+ [1][0][RTW89_ETSI][29] = 44,
+ [1][0][RTW89_MKK][29] = 58,
+ [1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_ACMA][29] = 127,
+ [1][0][RTW89_FCC][31] = 62,
+ [1][0][RTW89_ETSI][31] = 44,
+ [1][0][RTW89_MKK][31] = 58,
+ [1][0][RTW89_IC][31] = 62,
+ [1][0][RTW89_ACMA][31] = 34,
+ [1][0][RTW89_FCC][33] = 62,
+ [1][0][RTW89_ETSI][33] = 44,
+ [1][0][RTW89_MKK][33] = 58,
+ [1][0][RTW89_IC][33] = 62,
+ [1][0][RTW89_ACMA][33] = 34,
+ [1][0][RTW89_FCC][35] = 62,
+ [1][0][RTW89_ETSI][35] = 44,
+ [1][0][RTW89_MKK][35] = 58,
+ [1][0][RTW89_IC][35] = 62,
+ [1][0][RTW89_ACMA][35] = 34,
+ [1][0][RTW89_FCC][37] = 64,
+ [1][0][RTW89_ETSI][37] = 127,
+ [1][0][RTW89_MKK][37] = 52,
+ [1][0][RTW89_IC][37] = 64,
+ [1][0][RTW89_ACMA][37] = 64,
+ [1][0][RTW89_FCC][38] = 84,
+ [1][0][RTW89_ETSI][38] = 30,
+ [1][0][RTW89_MKK][38] = 127,
+ [1][0][RTW89_IC][38] = 84,
+ [1][0][RTW89_ACMA][38] = 84,
+ [1][0][RTW89_FCC][40] = 84,
+ [1][0][RTW89_ETSI][40] = 30,
+ [1][0][RTW89_MKK][40] = 127,
+ [1][0][RTW89_IC][40] = 84,
+ [1][0][RTW89_ACMA][40] = 84,
+ [1][0][RTW89_FCC][42] = 84,
+ [1][0][RTW89_ETSI][42] = 30,
+ [1][0][RTW89_MKK][42] = 127,
+ [1][0][RTW89_IC][42] = 84,
+ [1][0][RTW89_ACMA][42] = 84,
+ [1][0][RTW89_FCC][44] = 84,
+ [1][0][RTW89_ETSI][44] = 30,
+ [1][0][RTW89_MKK][44] = 127,
+ [1][0][RTW89_IC][44] = 84,
+ [1][0][RTW89_ACMA][44] = 84,
+ [1][0][RTW89_FCC][46] = 84,
+ [1][0][RTW89_ETSI][46] = 30,
+ [1][0][RTW89_MKK][46] = 127,
+ [1][0][RTW89_IC][46] = 84,
+ [1][0][RTW89_ACMA][46] = 84,
+ [1][0][RTW89_FCC][48] = 44,
+ [1][0][RTW89_ETSI][48] = 127,
+ [1][0][RTW89_MKK][48] = 127,
+ [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_ACMA][48] = 127,
+ [1][0][RTW89_FCC][50] = 44,
+ [1][0][RTW89_ETSI][50] = 127,
+ [1][0][RTW89_MKK][50] = 127,
+ [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_ACMA][50] = 127,
+ [1][0][RTW89_FCC][52] = 44,
+ [1][0][RTW89_ETSI][52] = 127,
+ [1][0][RTW89_MKK][52] = 127,
+ [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_ACMA][52] = 127,
+ [1][1][RTW89_FCC][0] = 42,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_MKK][0] = 22,
+ [1][1][RTW89_IC][0] = 10,
+ [1][1][RTW89_ACMA][0] = 22,
+ [1][1][RTW89_FCC][2] = 44,
+ [1][1][RTW89_ETSI][2] = 32,
+ [1][1][RTW89_MKK][2] = 22,
+ [1][1][RTW89_IC][2] = 14,
+ [1][1][RTW89_ACMA][2] = 22,
+ [1][1][RTW89_FCC][4] = 42,
+ [1][1][RTW89_ETSI][4] = 32,
+ [1][1][RTW89_MKK][4] = 20,
+ [1][1][RTW89_IC][4] = 10,
+ [1][1][RTW89_ACMA][4] = 22,
+ [1][1][RTW89_FCC][6] = 42,
+ [1][1][RTW89_ETSI][6] = 32,
+ [1][1][RTW89_MKK][6] = 20,
+ [1][1][RTW89_IC][6] = 10,
+ [1][1][RTW89_ACMA][6] = 22,
+ [1][1][RTW89_FCC][8] = 44,
+ [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_MKK][8] = 20,
+ [1][1][RTW89_IC][8] = 44,
+ [1][1][RTW89_ACMA][8] = 22,
+ [1][1][RTW89_FCC][10] = 44,
+ [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_MKK][10] = 20,
+ [1][1][RTW89_IC][10] = 44,
+ [1][1][RTW89_ACMA][10] = 22,
+ [1][1][RTW89_FCC][12] = 46,
+ [1][1][RTW89_ETSI][12] = 32,
+ [1][1][RTW89_MKK][12] = 22,
+ [1][1][RTW89_IC][12] = 46,
+ [1][1][RTW89_ACMA][12] = 22,
+ [1][1][RTW89_FCC][14] = 42,
+ [1][1][RTW89_ETSI][14] = 32,
+ [1][1][RTW89_MKK][14] = 22,
+ [1][1][RTW89_IC][14] = 40,
+ [1][1][RTW89_ACMA][14] = 22,
+ [1][1][RTW89_FCC][15] = 42,
+ [1][1][RTW89_ETSI][15] = 30,
+ [1][1][RTW89_MKK][15] = 42,
+ [1][1][RTW89_IC][15] = 42,
+ [1][1][RTW89_ACMA][15] = 22,
+ [1][1][RTW89_FCC][17] = 42,
+ [1][1][RTW89_ETSI][17] = 30,
+ [1][1][RTW89_MKK][17] = 44,
+ [1][1][RTW89_IC][17] = 42,
+ [1][1][RTW89_ACMA][17] = 22,
+ [1][1][RTW89_FCC][19] = 42,
+ [1][1][RTW89_ETSI][19] = 30,
+ [1][1][RTW89_MKK][19] = 44,
+ [1][1][RTW89_IC][19] = 42,
+ [1][1][RTW89_ACMA][19] = 22,
+ [1][1][RTW89_FCC][21] = 42,
+ [1][1][RTW89_ETSI][21] = 30,
+ [1][1][RTW89_MKK][21] = 44,
+ [1][1][RTW89_IC][21] = 42,
+ [1][1][RTW89_ACMA][21] = 22,
+ [1][1][RTW89_FCC][23] = 42,
+ [1][1][RTW89_ETSI][23] = 30,
+ [1][1][RTW89_MKK][23] = 44,
+ [1][1][RTW89_IC][23] = 42,
+ [1][1][RTW89_ACMA][23] = 22,
+ [1][1][RTW89_FCC][25] = 42,
+ [1][1][RTW89_ETSI][25] = 30,
+ [1][1][RTW89_MKK][25] = 44,
+ [1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_ACMA][25] = 127,
+ [1][1][RTW89_FCC][27] = 42,
+ [1][1][RTW89_ETSI][27] = 32,
+ [1][1][RTW89_MKK][27] = 44,
+ [1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_ACMA][27] = 127,
+ [1][1][RTW89_FCC][29] = 42,
+ [1][1][RTW89_ETSI][29] = 32,
+ [1][1][RTW89_MKK][29] = 44,
+ [1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_ACMA][29] = 127,
+ [1][1][RTW89_FCC][31] = 42,
+ [1][1][RTW89_ETSI][31] = 32,
+ [1][1][RTW89_MKK][31] = 44,
+ [1][1][RTW89_IC][31] = 38,
+ [1][1][RTW89_ACMA][31] = 22,
+ [1][1][RTW89_FCC][33] = 40,
+ [1][1][RTW89_ETSI][33] = 32,
+ [1][1][RTW89_MKK][33] = 44,
+ [1][1][RTW89_IC][33] = 38,
+ [1][1][RTW89_ACMA][33] = 22,
+ [1][1][RTW89_FCC][35] = 40,
+ [1][1][RTW89_ETSI][35] = 32,
+ [1][1][RTW89_MKK][35] = 44,
+ [1][1][RTW89_IC][35] = 38,
+ [1][1][RTW89_ACMA][35] = 22,
+ [1][1][RTW89_FCC][37] = 48,
+ [1][1][RTW89_ETSI][37] = 127,
+ [1][1][RTW89_MKK][37] = 42,
+ [1][1][RTW89_IC][37] = 48,
+ [1][1][RTW89_ACMA][37] = 48,
+ [1][1][RTW89_FCC][38] = 84,
+ [1][1][RTW89_ETSI][38] = 18,
+ [1][1][RTW89_MKK][38] = 127,
+ [1][1][RTW89_IC][38] = 84,
+ [1][1][RTW89_ACMA][38] = 82,
+ [1][1][RTW89_FCC][40] = 84,
+ [1][1][RTW89_ETSI][40] = 18,
+ [1][1][RTW89_MKK][40] = 127,
+ [1][1][RTW89_IC][40] = 84,
+ [1][1][RTW89_ACMA][40] = 82,
+ [1][1][RTW89_FCC][42] = 84,
+ [1][1][RTW89_ETSI][42] = 18,
+ [1][1][RTW89_MKK][42] = 127,
+ [1][1][RTW89_IC][42] = 84,
+ [1][1][RTW89_ACMA][42] = 84,
+ [1][1][RTW89_FCC][44] = 84,
+ [1][1][RTW89_ETSI][44] = 18,
+ [1][1][RTW89_MKK][44] = 127,
+ [1][1][RTW89_IC][44] = 84,
+ [1][1][RTW89_ACMA][44] = 84,
+ [1][1][RTW89_FCC][46] = 84,
+ [1][1][RTW89_ETSI][46] = 18,
+ [1][1][RTW89_MKK][46] = 127,
+ [1][1][RTW89_IC][46] = 84,
+ [1][1][RTW89_ACMA][46] = 84,
+ [1][1][RTW89_FCC][48] = 32,
+ [1][1][RTW89_ETSI][48] = 127,
+ [1][1][RTW89_MKK][48] = 127,
+ [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_ACMA][48] = 127,
+ [1][1][RTW89_FCC][50] = 32,
+ [1][1][RTW89_ETSI][50] = 127,
+ [1][1][RTW89_MKK][50] = 127,
+ [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_ACMA][50] = 127,
+ [1][1][RTW89_FCC][52] = 32,
+ [1][1][RTW89_ETSI][52] = 127,
+ [1][1][RTW89_MKK][52] = 127,
+ [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_ACMA][52] = 127,
+ [2][0][RTW89_FCC][0] = 70,
+ [2][0][RTW89_ETSI][0] = 54,
+ [2][0][RTW89_MKK][0] = 48,
+ [2][0][RTW89_IC][0] = 46,
+ [2][0][RTW89_ACMA][0] = 48,
+ [2][0][RTW89_FCC][2] = 70,
+ [2][0][RTW89_ETSI][2] = 54,
+ [2][0][RTW89_MKK][2] = 48,
+ [2][0][RTW89_IC][2] = 46,
+ [2][0][RTW89_ACMA][2] = 48,
+ [2][0][RTW89_FCC][4] = 70,
+ [2][0][RTW89_ETSI][4] = 54,
+ [2][0][RTW89_MKK][4] = 48,
+ [2][0][RTW89_IC][4] = 46,
+ [2][0][RTW89_ACMA][4] = 48,
+ [2][0][RTW89_FCC][6] = 70,
+ [2][0][RTW89_ETSI][6] = 54,
+ [2][0][RTW89_MKK][6] = 48,
+ [2][0][RTW89_IC][6] = 46,
+ [2][0][RTW89_ACMA][6] = 48,
+ [2][0][RTW89_FCC][8] = 70,
+ [2][0][RTW89_ETSI][8] = 54,
+ [2][0][RTW89_MKK][8] = 48,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_ACMA][8] = 48,
+ [2][0][RTW89_FCC][10] = 70,
+ [2][0][RTW89_ETSI][10] = 54,
+ [2][0][RTW89_MKK][10] = 48,
+ [2][0][RTW89_IC][10] = 66,
+ [2][0][RTW89_ACMA][10] = 48,
+ [2][0][RTW89_FCC][12] = 70,
+ [2][0][RTW89_ETSI][12] = 54,
+ [2][0][RTW89_MKK][12] = 46,
+ [2][0][RTW89_IC][12] = 66,
+ [2][0][RTW89_ACMA][12] = 48,
+ [2][0][RTW89_FCC][14] = 70,
+ [2][0][RTW89_ETSI][14] = 54,
+ [2][0][RTW89_MKK][14] = 46,
+ [2][0][RTW89_IC][14] = 66,
+ [2][0][RTW89_ACMA][14] = 48,
+ [2][0][RTW89_FCC][15] = 70,
+ [2][0][RTW89_ETSI][15] = 54,
+ [2][0][RTW89_MKK][15] = 68,
+ [2][0][RTW89_IC][15] = 70,
+ [2][0][RTW89_ACMA][15] = 48,
+ [2][0][RTW89_FCC][17] = 70,
+ [2][0][RTW89_ETSI][17] = 54,
+ [2][0][RTW89_MKK][17] = 70,
+ [2][0][RTW89_IC][17] = 70,
+ [2][0][RTW89_ACMA][17] = 48,
+ [2][0][RTW89_FCC][19] = 70,
+ [2][0][RTW89_ETSI][19] = 54,
+ [2][0][RTW89_MKK][19] = 70,
+ [2][0][RTW89_IC][19] = 70,
+ [2][0][RTW89_ACMA][19] = 48,
+ [2][0][RTW89_FCC][21] = 70,
+ [2][0][RTW89_ETSI][21] = 54,
+ [2][0][RTW89_MKK][21] = 70,
+ [2][0][RTW89_IC][21] = 70,
+ [2][0][RTW89_ACMA][21] = 48,
+ [2][0][RTW89_FCC][23] = 70,
+ [2][0][RTW89_ETSI][23] = 54,
+ [2][0][RTW89_MKK][23] = 70,
+ [2][0][RTW89_IC][23] = 70,
+ [2][0][RTW89_ACMA][23] = 48,
+ [2][0][RTW89_FCC][25] = 70,
+ [2][0][RTW89_ETSI][25] = 54,
+ [2][0][RTW89_MKK][25] = 70,
+ [2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_ACMA][25] = 127,
+ [2][0][RTW89_FCC][27] = 70,
+ [2][0][RTW89_ETSI][27] = 54,
+ [2][0][RTW89_MKK][27] = 70,
+ [2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_ACMA][27] = 127,
+ [2][0][RTW89_FCC][29] = 70,
+ [2][0][RTW89_ETSI][29] = 54,
+ [2][0][RTW89_MKK][29] = 70,
+ [2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_ACMA][29] = 127,
+ [2][0][RTW89_FCC][31] = 70,
+ [2][0][RTW89_ETSI][31] = 54,
+ [2][0][RTW89_MKK][31] = 70,
+ [2][0][RTW89_IC][31] = 72,
+ [2][0][RTW89_ACMA][31] = 48,
+ [2][0][RTW89_FCC][33] = 72,
+ [2][0][RTW89_ETSI][33] = 54,
+ [2][0][RTW89_MKK][33] = 70,
+ [2][0][RTW89_IC][33] = 72,
+ [2][0][RTW89_ACMA][33] = 48,
+ [2][0][RTW89_FCC][35] = 72,
+ [2][0][RTW89_ETSI][35] = 54,
+ [2][0][RTW89_MKK][35] = 70,
+ [2][0][RTW89_IC][35] = 72,
+ [2][0][RTW89_ACMA][35] = 48,
+ [2][0][RTW89_FCC][37] = 70,
+ [2][0][RTW89_ETSI][37] = 127,
+ [2][0][RTW89_MKK][37] = 66,
+ [2][0][RTW89_IC][37] = 70,
+ [2][0][RTW89_ACMA][37] = 76,
+ [2][0][RTW89_FCC][38] = 84,
+ [2][0][RTW89_ETSI][38] = 30,
+ [2][0][RTW89_MKK][38] = 127,
+ [2][0][RTW89_IC][38] = 84,
+ [2][0][RTW89_ACMA][38] = 84,
+ [2][0][RTW89_FCC][40] = 84,
+ [2][0][RTW89_ETSI][40] = 30,
+ [2][0][RTW89_MKK][40] = 127,
+ [2][0][RTW89_IC][40] = 84,
+ [2][0][RTW89_ACMA][40] = 84,
+ [2][0][RTW89_FCC][42] = 84,
+ [2][0][RTW89_ETSI][42] = 30,
+ [2][0][RTW89_MKK][42] = 127,
+ [2][0][RTW89_IC][42] = 84,
+ [2][0][RTW89_ACMA][42] = 84,
+ [2][0][RTW89_FCC][44] = 84,
+ [2][0][RTW89_ETSI][44] = 30,
+ [2][0][RTW89_MKK][44] = 127,
+ [2][0][RTW89_IC][44] = 84,
+ [2][0][RTW89_ACMA][44] = 84,
+ [2][0][RTW89_FCC][46] = 84,
+ [2][0][RTW89_ETSI][46] = 30,
+ [2][0][RTW89_MKK][46] = 127,
+ [2][0][RTW89_IC][46] = 84,
+ [2][0][RTW89_ACMA][46] = 84,
+ [2][0][RTW89_FCC][48] = 56,
+ [2][0][RTW89_ETSI][48] = 127,
+ [2][0][RTW89_MKK][48] = 127,
+ [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_ACMA][48] = 127,
+ [2][0][RTW89_FCC][50] = 56,
+ [2][0][RTW89_ETSI][50] = 127,
+ [2][0][RTW89_MKK][50] = 127,
+ [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_ACMA][50] = 127,
+ [2][0][RTW89_FCC][52] = 56,
+ [2][0][RTW89_ETSI][52] = 127,
+ [2][0][RTW89_MKK][52] = 127,
+ [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_ACMA][52] = 127,
+ [2][1][RTW89_FCC][0] = 50,
+ [2][1][RTW89_ETSI][0] = 42,
+ [2][1][RTW89_MKK][0] = 36,
+ [2][1][RTW89_IC][0] = 20,
+ [2][1][RTW89_ACMA][0] = 36,
+ [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_ETSI][2] = 42,
+ [2][1][RTW89_MKK][2] = 36,
+ [2][1][RTW89_IC][2] = 18,
+ [2][1][RTW89_ACMA][2] = 36,
+ [2][1][RTW89_FCC][4] = 50,
+ [2][1][RTW89_ETSI][4] = 42,
+ [2][1][RTW89_MKK][4] = 36,
+ [2][1][RTW89_IC][4] = 22,
+ [2][1][RTW89_ACMA][4] = 36,
+ [2][1][RTW89_FCC][6] = 50,
+ [2][1][RTW89_ETSI][6] = 42,
+ [2][1][RTW89_MKK][6] = 36,
+ [2][1][RTW89_IC][6] = 22,
+ [2][1][RTW89_ACMA][6] = 36,
+ [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_ETSI][8] = 42,
+ [2][1][RTW89_MKK][8] = 34,
+ [2][1][RTW89_IC][8] = 50,
+ [2][1][RTW89_ACMA][8] = 36,
+ [2][1][RTW89_FCC][10] = 50,
+ [2][1][RTW89_ETSI][10] = 42,
+ [2][1][RTW89_MKK][10] = 34,
+ [2][1][RTW89_IC][10] = 50,
+ [2][1][RTW89_ACMA][10] = 36,
+ [2][1][RTW89_FCC][12] = 52,
+ [2][1][RTW89_ETSI][12] = 42,
+ [2][1][RTW89_MKK][12] = 36,
+ [2][1][RTW89_IC][12] = 52,
+ [2][1][RTW89_ACMA][12] = 36,
+ [2][1][RTW89_FCC][14] = 52,
+ [2][1][RTW89_ETSI][14] = 42,
+ [2][1][RTW89_MKK][14] = 36,
+ [2][1][RTW89_IC][14] = 52,
+ [2][1][RTW89_ACMA][14] = 36,
+ [2][1][RTW89_FCC][15] = 50,
+ [2][1][RTW89_ETSI][15] = 42,
+ [2][1][RTW89_MKK][15] = 54,
+ [2][1][RTW89_IC][15] = 50,
+ [2][1][RTW89_ACMA][15] = 36,
+ [2][1][RTW89_FCC][17] = 50,
+ [2][1][RTW89_ETSI][17] = 42,
+ [2][1][RTW89_MKK][17] = 56,
+ [2][1][RTW89_IC][17] = 50,
+ [2][1][RTW89_ACMA][17] = 36,
+ [2][1][RTW89_FCC][19] = 50,
+ [2][1][RTW89_ETSI][19] = 42,
+ [2][1][RTW89_MKK][19] = 56,
+ [2][1][RTW89_IC][19] = 50,
+ [2][1][RTW89_ACMA][19] = 36,
+ [2][1][RTW89_FCC][21] = 50,
+ [2][1][RTW89_ETSI][21] = 42,
+ [2][1][RTW89_MKK][21] = 56,
+ [2][1][RTW89_IC][21] = 50,
+ [2][1][RTW89_ACMA][21] = 36,
+ [2][1][RTW89_FCC][23] = 50,
+ [2][1][RTW89_ETSI][23] = 42,
+ [2][1][RTW89_MKK][23] = 56,
+ [2][1][RTW89_IC][23] = 50,
+ [2][1][RTW89_ACMA][23] = 36,
+ [2][1][RTW89_FCC][25] = 50,
+ [2][1][RTW89_ETSI][25] = 42,
+ [2][1][RTW89_MKK][25] = 56,
+ [2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_ACMA][25] = 127,
+ [2][1][RTW89_FCC][27] = 50,
+ [2][1][RTW89_ETSI][27] = 42,
+ [2][1][RTW89_MKK][27] = 56,
+ [2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_ACMA][27] = 127,
+ [2][1][RTW89_FCC][29] = 50,
+ [2][1][RTW89_ETSI][29] = 42,
+ [2][1][RTW89_MKK][29] = 56,
+ [2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_ACMA][29] = 127,
+ [2][1][RTW89_FCC][31] = 50,
+ [2][1][RTW89_ETSI][31] = 42,
+ [2][1][RTW89_MKK][31] = 56,
+ [2][1][RTW89_IC][31] = 50,
+ [2][1][RTW89_ACMA][31] = 36,
+ [2][1][RTW89_FCC][33] = 50,
+ [2][1][RTW89_ETSI][33] = 42,
+ [2][1][RTW89_MKK][33] = 56,
+ [2][1][RTW89_IC][33] = 50,
+ [2][1][RTW89_ACMA][33] = 36,
+ [2][1][RTW89_FCC][35] = 50,
+ [2][1][RTW89_ETSI][35] = 42,
+ [2][1][RTW89_MKK][35] = 56,
+ [2][1][RTW89_IC][35] = 50,
+ [2][1][RTW89_ACMA][35] = 36,
+ [2][1][RTW89_FCC][37] = 50,
+ [2][1][RTW89_ETSI][37] = 127,
+ [2][1][RTW89_MKK][37] = 54,
+ [2][1][RTW89_IC][37] = 50,
+ [2][1][RTW89_ACMA][37] = 60,
+ [2][1][RTW89_FCC][38] = 84,
+ [2][1][RTW89_ETSI][38] = 18,
+ [2][1][RTW89_MKK][38] = 127,
+ [2][1][RTW89_IC][38] = 84,
+ [2][1][RTW89_ACMA][38] = 84,
+ [2][1][RTW89_FCC][40] = 84,
+ [2][1][RTW89_ETSI][40] = 18,
+ [2][1][RTW89_MKK][40] = 127,
+ [2][1][RTW89_IC][40] = 84,
+ [2][1][RTW89_ACMA][40] = 84,
+ [2][1][RTW89_FCC][42] = 84,
+ [2][1][RTW89_ETSI][42] = 18,
+ [2][1][RTW89_MKK][42] = 127,
+ [2][1][RTW89_IC][42] = 84,
+ [2][1][RTW89_ACMA][42] = 84,
+ [2][1][RTW89_FCC][44] = 84,
+ [2][1][RTW89_ETSI][44] = 18,
+ [2][1][RTW89_MKK][44] = 127,
+ [2][1][RTW89_IC][44] = 84,
+ [2][1][RTW89_ACMA][44] = 84,
+ [2][1][RTW89_FCC][46] = 84,
+ [2][1][RTW89_ETSI][46] = 18,
+ [2][1][RTW89_MKK][46] = 127,
+ [2][1][RTW89_IC][46] = 84,
+ [2][1][RTW89_ACMA][46] = 84,
+ [2][1][RTW89_FCC][48] = 44,
+ [2][1][RTW89_ETSI][48] = 127,
+ [2][1][RTW89_MKK][48] = 127,
+ [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_ACMA][48] = 127,
+ [2][1][RTW89_FCC][50] = 44,
+ [2][1][RTW89_ETSI][50] = 127,
+ [2][1][RTW89_MKK][50] = 127,
+ [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_ACMA][50] = 127,
+ [2][1][RTW89_FCC][52] = 44,
+ [2][1][RTW89_ETSI][52] = 127,
+ [2][1][RTW89_MKK][52] = 127,
+ [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_ACMA][52] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 76,
+ [0][0][RTW89_WW][2] = 76,
+ [0][0][RTW89_WW][4] = 76,
+ [0][0][RTW89_WW][6] = 76,
+ [0][0][RTW89_WW][8] = 76,
+ [0][0][RTW89_WW][10] = 76,
+ [0][0][RTW89_WW][12] = 76,
+ [0][0][RTW89_WW][14] = 76,
+ [0][0][RTW89_WW][15] = 76,
+ [0][0][RTW89_WW][17] = 76,
+ [0][0][RTW89_WW][19] = 76,
+ [0][0][RTW89_WW][21] = 76,
+ [0][0][RTW89_WW][23] = 76,
+ [0][0][RTW89_WW][25] = 76,
+ [0][0][RTW89_WW][27] = 76,
+ [0][0][RTW89_WW][29] = 76,
+ [0][0][RTW89_WW][30] = 76,
+ [0][0][RTW89_WW][32] = 76,
+ [0][0][RTW89_WW][34] = 76,
+ [0][0][RTW89_WW][36] = 76,
+ [0][0][RTW89_WW][38] = 76,
+ [0][0][RTW89_WW][40] = 76,
+ [0][0][RTW89_WW][42] = 76,
+ [0][0][RTW89_WW][44] = 76,
+ [0][0][RTW89_WW][45] = 76,
+ [0][0][RTW89_WW][47] = 76,
+ [0][0][RTW89_WW][49] = 76,
+ [0][0][RTW89_WW][51] = 76,
+ [0][0][RTW89_WW][53] = 76,
+ [0][0][RTW89_WW][55] = 76,
+ [0][0][RTW89_WW][57] = 76,
+ [0][0][RTW89_WW][59] = 76,
+ [0][0][RTW89_WW][60] = 76,
+ [0][0][RTW89_WW][62] = 76,
+ [0][0][RTW89_WW][64] = 76,
+ [0][0][RTW89_WW][66] = 76,
+ [0][0][RTW89_WW][68] = 76,
+ [0][0][RTW89_WW][70] = 76,
+ [0][0][RTW89_WW][72] = 76,
+ [0][0][RTW89_WW][74] = 76,
+ [0][0][RTW89_WW][75] = 76,
+ [0][0][RTW89_WW][77] = 76,
+ [0][0][RTW89_WW][79] = 76,
+ [0][0][RTW89_WW][81] = 76,
+ [0][0][RTW89_WW][83] = 76,
+ [0][0][RTW89_WW][85] = 76,
+ [0][0][RTW89_WW][87] = 76,
+ [0][0][RTW89_WW][89] = 76,
+ [0][0][RTW89_WW][90] = 76,
+ [0][0][RTW89_WW][92] = 76,
+ [0][0][RTW89_WW][94] = 76,
+ [0][0][RTW89_WW][96] = 76,
+ [0][0][RTW89_WW][98] = 76,
+ [0][0][RTW89_WW][100] = 76,
+ [0][0][RTW89_WW][102] = 76,
+ [0][0][RTW89_WW][104] = 76,
+ [0][0][RTW89_WW][105] = 76,
+ [0][0][RTW89_WW][107] = 76,
+ [0][0][RTW89_WW][109] = 76,
+ [0][0][RTW89_WW][111] = 0,
+ [0][0][RTW89_WW][113] = 0,
+ [0][0][RTW89_WW][115] = 0,
+ [0][0][RTW89_WW][117] = 0,
+ [0][0][RTW89_WW][119] = 0,
+ [0][1][RTW89_WW][0] = 76,
+ [0][1][RTW89_WW][2] = 76,
+ [0][1][RTW89_WW][4] = 76,
+ [0][1][RTW89_WW][6] = 76,
+ [0][1][RTW89_WW][8] = 76,
+ [0][1][RTW89_WW][10] = 76,
+ [0][1][RTW89_WW][12] = 76,
+ [0][1][RTW89_WW][14] = 76,
+ [0][1][RTW89_WW][15] = 76,
+ [0][1][RTW89_WW][17] = 76,
+ [0][1][RTW89_WW][19] = 76,
+ [0][1][RTW89_WW][21] = 76,
+ [0][1][RTW89_WW][23] = 76,
+ [0][1][RTW89_WW][25] = 76,
+ [0][1][RTW89_WW][27] = 76,
+ [0][1][RTW89_WW][29] = 76,
+ [0][1][RTW89_WW][30] = 76,
+ [0][1][RTW89_WW][32] = 76,
+ [0][1][RTW89_WW][34] = 76,
+ [0][1][RTW89_WW][36] = 76,
+ [0][1][RTW89_WW][38] = 76,
+ [0][1][RTW89_WW][40] = 76,
+ [0][1][RTW89_WW][42] = 76,
+ [0][1][RTW89_WW][44] = 76,
+ [0][1][RTW89_WW][45] = 76,
+ [0][1][RTW89_WW][47] = 76,
+ [0][1][RTW89_WW][49] = 76,
+ [0][1][RTW89_WW][51] = 76,
+ [0][1][RTW89_WW][53] = 76,
+ [0][1][RTW89_WW][55] = 76,
+ [0][1][RTW89_WW][57] = 76,
+ [0][1][RTW89_WW][59] = 76,
+ [0][1][RTW89_WW][60] = 76,
+ [0][1][RTW89_WW][62] = 76,
+ [0][1][RTW89_WW][64] = 76,
+ [0][1][RTW89_WW][66] = 76,
+ [0][1][RTW89_WW][68] = 76,
+ [0][1][RTW89_WW][70] = 76,
+ [0][1][RTW89_WW][72] = 76,
+ [0][1][RTW89_WW][74] = 76,
+ [0][1][RTW89_WW][75] = 76,
+ [0][1][RTW89_WW][77] = 76,
+ [0][1][RTW89_WW][79] = 76,
+ [0][1][RTW89_WW][81] = 76,
+ [0][1][RTW89_WW][83] = 76,
+ [0][1][RTW89_WW][85] = 76,
+ [0][1][RTW89_WW][87] = 76,
+ [0][1][RTW89_WW][89] = 76,
+ [0][1][RTW89_WW][90] = 76,
+ [0][1][RTW89_WW][92] = 76,
+ [0][1][RTW89_WW][94] = 76,
+ [0][1][RTW89_WW][96] = 76,
+ [0][1][RTW89_WW][98] = 76,
+ [0][1][RTW89_WW][100] = 76,
+ [0][1][RTW89_WW][102] = 76,
+ [0][1][RTW89_WW][104] = 76,
+ [0][1][RTW89_WW][105] = 76,
+ [0][1][RTW89_WW][107] = 76,
+ [0][1][RTW89_WW][109] = 76,
+ [0][1][RTW89_WW][111] = 0,
+ [0][1][RTW89_WW][113] = 0,
+ [0][1][RTW89_WW][115] = 0,
+ [0][1][RTW89_WW][117] = 0,
+ [0][1][RTW89_WW][119] = 0,
+ [1][0][RTW89_WW][0] = 76,
+ [1][0][RTW89_WW][2] = 76,
+ [1][0][RTW89_WW][4] = 76,
+ [1][0][RTW89_WW][6] = 76,
+ [1][0][RTW89_WW][8] = 76,
+ [1][0][RTW89_WW][10] = 76,
+ [1][0][RTW89_WW][12] = 76,
+ [1][0][RTW89_WW][14] = 76,
+ [1][0][RTW89_WW][15] = 76,
+ [1][0][RTW89_WW][17] = 76,
+ [1][0][RTW89_WW][19] = 76,
+ [1][0][RTW89_WW][21] = 76,
+ [1][0][RTW89_WW][23] = 76,
+ [1][0][RTW89_WW][25] = 76,
+ [1][0][RTW89_WW][27] = 76,
+ [1][0][RTW89_WW][29] = 76,
+ [1][0][RTW89_WW][30] = 76,
+ [1][0][RTW89_WW][32] = 76,
+ [1][0][RTW89_WW][34] = 76,
+ [1][0][RTW89_WW][36] = 76,
+ [1][0][RTW89_WW][38] = 76,
+ [1][0][RTW89_WW][40] = 76,
+ [1][0][RTW89_WW][42] = 76,
+ [1][0][RTW89_WW][44] = 76,
+ [1][0][RTW89_WW][45] = 76,
+ [1][0][RTW89_WW][47] = 76,
+ [1][0][RTW89_WW][49] = 76,
+ [1][0][RTW89_WW][51] = 76,
+ [1][0][RTW89_WW][53] = 76,
+ [1][0][RTW89_WW][55] = 76,
+ [1][0][RTW89_WW][57] = 76,
+ [1][0][RTW89_WW][59] = 76,
+ [1][0][RTW89_WW][60] = 76,
+ [1][0][RTW89_WW][62] = 76,
+ [1][0][RTW89_WW][64] = 76,
+ [1][0][RTW89_WW][66] = 76,
+ [1][0][RTW89_WW][68] = 76,
+ [1][0][RTW89_WW][70] = 76,
+ [1][0][RTW89_WW][72] = 76,
+ [1][0][RTW89_WW][74] = 76,
+ [1][0][RTW89_WW][75] = 76,
+ [1][0][RTW89_WW][77] = 76,
+ [1][0][RTW89_WW][79] = 76,
+ [1][0][RTW89_WW][81] = 76,
+ [1][0][RTW89_WW][83] = 76,
+ [1][0][RTW89_WW][85] = 76,
+ [1][0][RTW89_WW][87] = 76,
+ [1][0][RTW89_WW][89] = 76,
+ [1][0][RTW89_WW][90] = 76,
+ [1][0][RTW89_WW][92] = 76,
+ [1][0][RTW89_WW][94] = 76,
+ [1][0][RTW89_WW][96] = 76,
+ [1][0][RTW89_WW][98] = 76,
+ [1][0][RTW89_WW][100] = 76,
+ [1][0][RTW89_WW][102] = 76,
+ [1][0][RTW89_WW][104] = 76,
+ [1][0][RTW89_WW][105] = 76,
+ [1][0][RTW89_WW][107] = 76,
+ [1][0][RTW89_WW][109] = 76,
+ [1][0][RTW89_WW][111] = 0,
+ [1][0][RTW89_WW][113] = 0,
+ [1][0][RTW89_WW][115] = 0,
+ [1][0][RTW89_WW][117] = 0,
+ [1][0][RTW89_WW][119] = 0,
+ [1][1][RTW89_WW][0] = 76,
+ [1][1][RTW89_WW][2] = 76,
+ [1][1][RTW89_WW][4] = 76,
+ [1][1][RTW89_WW][6] = 76,
+ [1][1][RTW89_WW][8] = 76,
+ [1][1][RTW89_WW][10] = 76,
+ [1][1][RTW89_WW][12] = 76,
+ [1][1][RTW89_WW][14] = 76,
+ [1][1][RTW89_WW][15] = 76,
+ [1][1][RTW89_WW][17] = 76,
+ [1][1][RTW89_WW][19] = 76,
+ [1][1][RTW89_WW][21] = 76,
+ [1][1][RTW89_WW][23] = 76,
+ [1][1][RTW89_WW][25] = 76,
+ [1][1][RTW89_WW][27] = 76,
+ [1][1][RTW89_WW][29] = 76,
+ [1][1][RTW89_WW][30] = 76,
+ [1][1][RTW89_WW][32] = 76,
+ [1][1][RTW89_WW][34] = 76,
+ [1][1][RTW89_WW][36] = 76,
+ [1][1][RTW89_WW][38] = 76,
+ [1][1][RTW89_WW][40] = 76,
+ [1][1][RTW89_WW][42] = 76,
+ [1][1][RTW89_WW][44] = 76,
+ [1][1][RTW89_WW][45] = 76,
+ [1][1][RTW89_WW][47] = 76,
+ [1][1][RTW89_WW][49] = 76,
+ [1][1][RTW89_WW][51] = 76,
+ [1][1][RTW89_WW][53] = 76,
+ [1][1][RTW89_WW][55] = 76,
+ [1][1][RTW89_WW][57] = 76,
+ [1][1][RTW89_WW][59] = 76,
+ [1][1][RTW89_WW][60] = 76,
+ [1][1][RTW89_WW][62] = 76,
+ [1][1][RTW89_WW][64] = 76,
+ [1][1][RTW89_WW][66] = 76,
+ [1][1][RTW89_WW][68] = 76,
+ [1][1][RTW89_WW][70] = 76,
+ [1][1][RTW89_WW][72] = 76,
+ [1][1][RTW89_WW][74] = 76,
+ [1][1][RTW89_WW][75] = 76,
+ [1][1][RTW89_WW][77] = 76,
+ [1][1][RTW89_WW][79] = 76,
+ [1][1][RTW89_WW][81] = 76,
+ [1][1][RTW89_WW][83] = 76,
+ [1][1][RTW89_WW][85] = 76,
+ [1][1][RTW89_WW][87] = 76,
+ [1][1][RTW89_WW][89] = 76,
+ [1][1][RTW89_WW][90] = 76,
+ [1][1][RTW89_WW][92] = 76,
+ [1][1][RTW89_WW][94] = 76,
+ [1][1][RTW89_WW][96] = 76,
+ [1][1][RTW89_WW][98] = 76,
+ [1][1][RTW89_WW][100] = 76,
+ [1][1][RTW89_WW][102] = 76,
+ [1][1][RTW89_WW][104] = 76,
+ [1][1][RTW89_WW][105] = 76,
+ [1][1][RTW89_WW][107] = 76,
+ [1][1][RTW89_WW][109] = 76,
+ [1][1][RTW89_WW][111] = 0,
+ [1][1][RTW89_WW][113] = 0,
+ [1][1][RTW89_WW][115] = 0,
+ [1][1][RTW89_WW][117] = 0,
+ [1][1][RTW89_WW][119] = 0,
+ [2][0][RTW89_WW][0] = 76,
+ [2][0][RTW89_WW][2] = 76,
+ [2][0][RTW89_WW][4] = 76,
+ [2][0][RTW89_WW][6] = 76,
+ [2][0][RTW89_WW][8] = 76,
+ [2][0][RTW89_WW][10] = 76,
+ [2][0][RTW89_WW][12] = 76,
+ [2][0][RTW89_WW][14] = 76,
+ [2][0][RTW89_WW][15] = 76,
+ [2][0][RTW89_WW][17] = 76,
+ [2][0][RTW89_WW][19] = 76,
+ [2][0][RTW89_WW][21] = 76,
+ [2][0][RTW89_WW][23] = 76,
+ [2][0][RTW89_WW][25] = 76,
+ [2][0][RTW89_WW][27] = 76,
+ [2][0][RTW89_WW][29] = 76,
+ [2][0][RTW89_WW][30] = 76,
+ [2][0][RTW89_WW][32] = 76,
+ [2][0][RTW89_WW][34] = 76,
+ [2][0][RTW89_WW][36] = 76,
+ [2][0][RTW89_WW][38] = 76,
+ [2][0][RTW89_WW][40] = 76,
+ [2][0][RTW89_WW][42] = 76,
+ [2][0][RTW89_WW][44] = 76,
+ [2][0][RTW89_WW][45] = 76,
+ [2][0][RTW89_WW][47] = 76,
+ [2][0][RTW89_WW][49] = 76,
+ [2][0][RTW89_WW][51] = 76,
+ [2][0][RTW89_WW][53] = 76,
+ [2][0][RTW89_WW][55] = 76,
+ [2][0][RTW89_WW][57] = 76,
+ [2][0][RTW89_WW][59] = 76,
+ [2][0][RTW89_WW][60] = 76,
+ [2][0][RTW89_WW][62] = 76,
+ [2][0][RTW89_WW][64] = 76,
+ [2][0][RTW89_WW][66] = 76,
+ [2][0][RTW89_WW][68] = 76,
+ [2][0][RTW89_WW][70] = 76,
+ [2][0][RTW89_WW][72] = 76,
+ [2][0][RTW89_WW][74] = 76,
+ [2][0][RTW89_WW][75] = 76,
+ [2][0][RTW89_WW][77] = 76,
+ [2][0][RTW89_WW][79] = 76,
+ [2][0][RTW89_WW][81] = 76,
+ [2][0][RTW89_WW][83] = 76,
+ [2][0][RTW89_WW][85] = 76,
+ [2][0][RTW89_WW][87] = 76,
+ [2][0][RTW89_WW][89] = 76,
+ [2][0][RTW89_WW][90] = 76,
+ [2][0][RTW89_WW][92] = 76,
+ [2][0][RTW89_WW][94] = 76,
+ [2][0][RTW89_WW][96] = 76,
+ [2][0][RTW89_WW][98] = 76,
+ [2][0][RTW89_WW][100] = 76,
+ [2][0][RTW89_WW][102] = 76,
+ [2][0][RTW89_WW][104] = 76,
+ [2][0][RTW89_WW][105] = 76,
+ [2][0][RTW89_WW][107] = 76,
+ [2][0][RTW89_WW][109] = 76,
+ [2][0][RTW89_WW][111] = 0,
+ [2][0][RTW89_WW][113] = 0,
+ [2][0][RTW89_WW][115] = 0,
+ [2][0][RTW89_WW][117] = 0,
+ [2][0][RTW89_WW][119] = 0,
+ [2][1][RTW89_WW][0] = 76,
+ [2][1][RTW89_WW][2] = 76,
+ [2][1][RTW89_WW][4] = 76,
+ [2][1][RTW89_WW][6] = 76,
+ [2][1][RTW89_WW][8] = 76,
+ [2][1][RTW89_WW][10] = 76,
+ [2][1][RTW89_WW][12] = 76,
+ [2][1][RTW89_WW][14] = 76,
+ [2][1][RTW89_WW][15] = 76,
+ [2][1][RTW89_WW][17] = 76,
+ [2][1][RTW89_WW][19] = 76,
+ [2][1][RTW89_WW][21] = 76,
+ [2][1][RTW89_WW][23] = 76,
+ [2][1][RTW89_WW][25] = 76,
+ [2][1][RTW89_WW][27] = 76,
+ [2][1][RTW89_WW][29] = 76,
+ [2][1][RTW89_WW][30] = 76,
+ [2][1][RTW89_WW][32] = 76,
+ [2][1][RTW89_WW][34] = 76,
+ [2][1][RTW89_WW][36] = 76,
+ [2][1][RTW89_WW][38] = 76,
+ [2][1][RTW89_WW][40] = 76,
+ [2][1][RTW89_WW][42] = 76,
+ [2][1][RTW89_WW][44] = 76,
+ [2][1][RTW89_WW][45] = 76,
+ [2][1][RTW89_WW][47] = 76,
+ [2][1][RTW89_WW][49] = 76,
+ [2][1][RTW89_WW][51] = 76,
+ [2][1][RTW89_WW][53] = 76,
+ [2][1][RTW89_WW][55] = 76,
+ [2][1][RTW89_WW][57] = 76,
+ [2][1][RTW89_WW][59] = 76,
+ [2][1][RTW89_WW][60] = 76,
+ [2][1][RTW89_WW][62] = 76,
+ [2][1][RTW89_WW][64] = 76,
+ [2][1][RTW89_WW][66] = 76,
+ [2][1][RTW89_WW][68] = 76,
+ [2][1][RTW89_WW][70] = 76,
+ [2][1][RTW89_WW][72] = 76,
+ [2][1][RTW89_WW][74] = 76,
+ [2][1][RTW89_WW][75] = 76,
+ [2][1][RTW89_WW][77] = 76,
+ [2][1][RTW89_WW][79] = 76,
+ [2][1][RTW89_WW][81] = 76,
+ [2][1][RTW89_WW][83] = 76,
+ [2][1][RTW89_WW][85] = 76,
+ [2][1][RTW89_WW][87] = 76,
+ [2][1][RTW89_WW][89] = 76,
+ [2][1][RTW89_WW][90] = 76,
+ [2][1][RTW89_WW][92] = 76,
+ [2][1][RTW89_WW][94] = 76,
+ [2][1][RTW89_WW][96] = 76,
+ [2][1][RTW89_WW][98] = 76,
+ [2][1][RTW89_WW][100] = 76,
+ [2][1][RTW89_WW][102] = 76,
+ [2][1][RTW89_WW][104] = 76,
+ [2][1][RTW89_WW][105] = 76,
+ [2][1][RTW89_WW][107] = 76,
+ [2][1][RTW89_WW][109] = 76,
+ [2][1][RTW89_WW][111] = 0,
+ [2][1][RTW89_WW][113] = 0,
+ [2][1][RTW89_WW][115] = 0,
+ [2][1][RTW89_WW][117] = 0,
+ [2][1][RTW89_WW][119] = 0,
+ [0][0][RTW89_FCC][0] = 76,
+ [0][0][RTW89_FCC][2] = 76,
+ [0][0][RTW89_FCC][4] = 76,
+ [0][0][RTW89_FCC][6] = 76,
+ [0][0][RTW89_FCC][8] = 76,
+ [0][0][RTW89_FCC][10] = 76,
+ [0][0][RTW89_FCC][12] = 76,
+ [0][0][RTW89_FCC][14] = 76,
+ [0][0][RTW89_FCC][15] = 76,
+ [0][0][RTW89_FCC][17] = 76,
+ [0][0][RTW89_FCC][19] = 76,
+ [0][0][RTW89_FCC][21] = 76,
+ [0][0][RTW89_FCC][23] = 76,
+ [0][0][RTW89_FCC][25] = 76,
+ [0][0][RTW89_FCC][27] = 76,
+ [0][0][RTW89_FCC][29] = 76,
+ [0][0][RTW89_FCC][30] = 76,
+ [0][0][RTW89_FCC][32] = 76,
+ [0][0][RTW89_FCC][34] = 76,
+ [0][0][RTW89_FCC][36] = 76,
+ [0][0][RTW89_FCC][38] = 76,
+ [0][0][RTW89_FCC][40] = 76,
+ [0][0][RTW89_FCC][42] = 76,
+ [0][0][RTW89_FCC][44] = 76,
+ [0][0][RTW89_FCC][45] = 76,
+ [0][0][RTW89_FCC][47] = 76,
+ [0][0][RTW89_FCC][49] = 76,
+ [0][0][RTW89_FCC][51] = 76,
+ [0][0][RTW89_FCC][53] = 76,
+ [0][0][RTW89_FCC][55] = 76,
+ [0][0][RTW89_FCC][57] = 76,
+ [0][0][RTW89_FCC][59] = 76,
+ [0][0][RTW89_FCC][60] = 76,
+ [0][0][RTW89_FCC][62] = 76,
+ [0][0][RTW89_FCC][64] = 76,
+ [0][0][RTW89_FCC][66] = 76,
+ [0][0][RTW89_FCC][68] = 76,
+ [0][0][RTW89_FCC][70] = 76,
+ [0][0][RTW89_FCC][72] = 76,
+ [0][0][RTW89_FCC][74] = 76,
+ [0][0][RTW89_FCC][75] = 76,
+ [0][0][RTW89_FCC][77] = 76,
+ [0][0][RTW89_FCC][79] = 76,
+ [0][0][RTW89_FCC][81] = 76,
+ [0][0][RTW89_FCC][83] = 76,
+ [0][0][RTW89_FCC][85] = 76,
+ [0][0][RTW89_FCC][87] = 76,
+ [0][0][RTW89_FCC][89] = 76,
+ [0][0][RTW89_FCC][90] = 76,
+ [0][0][RTW89_FCC][92] = 76,
+ [0][0][RTW89_FCC][94] = 76,
+ [0][0][RTW89_FCC][96] = 76,
+ [0][0][RTW89_FCC][98] = 76,
+ [0][0][RTW89_FCC][100] = 76,
+ [0][0][RTW89_FCC][102] = 76,
+ [0][0][RTW89_FCC][104] = 76,
+ [0][0][RTW89_FCC][105] = 76,
+ [0][0][RTW89_FCC][107] = 76,
+ [0][0][RTW89_FCC][109] = 76,
+ [0][0][RTW89_FCC][111] = 127,
+ [0][0][RTW89_FCC][113] = 127,
+ [0][0][RTW89_FCC][115] = 127,
+ [0][0][RTW89_FCC][117] = 127,
+ [0][0][RTW89_FCC][119] = 127,
+ [0][1][RTW89_FCC][0] = 76,
+ [0][1][RTW89_FCC][2] = 76,
+ [0][1][RTW89_FCC][4] = 76,
+ [0][1][RTW89_FCC][6] = 76,
+ [0][1][RTW89_FCC][8] = 76,
+ [0][1][RTW89_FCC][10] = 76,
+ [0][1][RTW89_FCC][12] = 76,
+ [0][1][RTW89_FCC][14] = 76,
+ [0][1][RTW89_FCC][15] = 76,
+ [0][1][RTW89_FCC][17] = 76,
+ [0][1][RTW89_FCC][19] = 76,
+ [0][1][RTW89_FCC][21] = 76,
+ [0][1][RTW89_FCC][23] = 76,
+ [0][1][RTW89_FCC][25] = 76,
+ [0][1][RTW89_FCC][27] = 76,
+ [0][1][RTW89_FCC][29] = 76,
+ [0][1][RTW89_FCC][30] = 76,
+ [0][1][RTW89_FCC][32] = 76,
+ [0][1][RTW89_FCC][34] = 76,
+ [0][1][RTW89_FCC][36] = 76,
+ [0][1][RTW89_FCC][38] = 76,
+ [0][1][RTW89_FCC][40] = 76,
+ [0][1][RTW89_FCC][42] = 76,
+ [0][1][RTW89_FCC][44] = 76,
+ [0][1][RTW89_FCC][45] = 76,
+ [0][1][RTW89_FCC][47] = 76,
+ [0][1][RTW89_FCC][49] = 76,
+ [0][1][RTW89_FCC][51] = 76,
+ [0][1][RTW89_FCC][53] = 76,
+ [0][1][RTW89_FCC][55] = 76,
+ [0][1][RTW89_FCC][57] = 76,
+ [0][1][RTW89_FCC][59] = 76,
+ [0][1][RTW89_FCC][60] = 76,
+ [0][1][RTW89_FCC][62] = 76,
+ [0][1][RTW89_FCC][64] = 76,
+ [0][1][RTW89_FCC][66] = 76,
+ [0][1][RTW89_FCC][68] = 76,
+ [0][1][RTW89_FCC][70] = 76,
+ [0][1][RTW89_FCC][72] = 76,
+ [0][1][RTW89_FCC][74] = 76,
+ [0][1][RTW89_FCC][75] = 76,
+ [0][1][RTW89_FCC][77] = 76,
+ [0][1][RTW89_FCC][79] = 76,
+ [0][1][RTW89_FCC][81] = 76,
+ [0][1][RTW89_FCC][83] = 76,
+ [0][1][RTW89_FCC][85] = 76,
+ [0][1][RTW89_FCC][87] = 76,
+ [0][1][RTW89_FCC][89] = 76,
+ [0][1][RTW89_FCC][90] = 76,
+ [0][1][RTW89_FCC][92] = 76,
+ [0][1][RTW89_FCC][94] = 76,
+ [0][1][RTW89_FCC][96] = 76,
+ [0][1][RTW89_FCC][98] = 76,
+ [0][1][RTW89_FCC][100] = 76,
+ [0][1][RTW89_FCC][102] = 76,
+ [0][1][RTW89_FCC][104] = 76,
+ [0][1][RTW89_FCC][105] = 76,
+ [0][1][RTW89_FCC][107] = 76,
+ [0][1][RTW89_FCC][109] = 76,
+ [0][1][RTW89_FCC][111] = 127,
+ [0][1][RTW89_FCC][113] = 127,
+ [0][1][RTW89_FCC][115] = 127,
+ [0][1][RTW89_FCC][117] = 127,
+ [0][1][RTW89_FCC][119] = 127,
+ [1][0][RTW89_FCC][0] = 76,
+ [1][0][RTW89_FCC][2] = 76,
+ [1][0][RTW89_FCC][4] = 76,
+ [1][0][RTW89_FCC][6] = 76,
+ [1][0][RTW89_FCC][8] = 76,
+ [1][0][RTW89_FCC][10] = 76,
+ [1][0][RTW89_FCC][12] = 76,
+ [1][0][RTW89_FCC][14] = 76,
+ [1][0][RTW89_FCC][15] = 76,
+ [1][0][RTW89_FCC][17] = 76,
+ [1][0][RTW89_FCC][19] = 76,
+ [1][0][RTW89_FCC][21] = 76,
+ [1][0][RTW89_FCC][23] = 76,
+ [1][0][RTW89_FCC][25] = 76,
+ [1][0][RTW89_FCC][27] = 76,
+ [1][0][RTW89_FCC][29] = 76,
+ [1][0][RTW89_FCC][30] = 76,
+ [1][0][RTW89_FCC][32] = 76,
+ [1][0][RTW89_FCC][34] = 76,
+ [1][0][RTW89_FCC][36] = 76,
+ [1][0][RTW89_FCC][38] = 76,
+ [1][0][RTW89_FCC][40] = 76,
+ [1][0][RTW89_FCC][42] = 76,
+ [1][0][RTW89_FCC][44] = 76,
+ [1][0][RTW89_FCC][45] = 76,
+ [1][0][RTW89_FCC][47] = 76,
+ [1][0][RTW89_FCC][49] = 76,
+ [1][0][RTW89_FCC][51] = 76,
+ [1][0][RTW89_FCC][53] = 76,
+ [1][0][RTW89_FCC][55] = 76,
+ [1][0][RTW89_FCC][57] = 76,
+ [1][0][RTW89_FCC][59] = 76,
+ [1][0][RTW89_FCC][60] = 76,
+ [1][0][RTW89_FCC][62] = 76,
+ [1][0][RTW89_FCC][64] = 76,
+ [1][0][RTW89_FCC][66] = 76,
+ [1][0][RTW89_FCC][68] = 76,
+ [1][0][RTW89_FCC][70] = 76,
+ [1][0][RTW89_FCC][72] = 76,
+ [1][0][RTW89_FCC][74] = 76,
+ [1][0][RTW89_FCC][75] = 76,
+ [1][0][RTW89_FCC][77] = 76,
+ [1][0][RTW89_FCC][79] = 76,
+ [1][0][RTW89_FCC][81] = 76,
+ [1][0][RTW89_FCC][83] = 76,
+ [1][0][RTW89_FCC][85] = 76,
+ [1][0][RTW89_FCC][87] = 76,
+ [1][0][RTW89_FCC][89] = 76,
+ [1][0][RTW89_FCC][90] = 76,
+ [1][0][RTW89_FCC][92] = 76,
+ [1][0][RTW89_FCC][94] = 76,
+ [1][0][RTW89_FCC][96] = 76,
+ [1][0][RTW89_FCC][98] = 76,
+ [1][0][RTW89_FCC][100] = 76,
+ [1][0][RTW89_FCC][102] = 76,
+ [1][0][RTW89_FCC][104] = 76,
+ [1][0][RTW89_FCC][105] = 76,
+ [1][0][RTW89_FCC][107] = 76,
+ [1][0][RTW89_FCC][109] = 76,
+ [1][0][RTW89_FCC][111] = 127,
+ [1][0][RTW89_FCC][113] = 127,
+ [1][0][RTW89_FCC][115] = 127,
+ [1][0][RTW89_FCC][117] = 127,
+ [1][0][RTW89_FCC][119] = 127,
+ [1][1][RTW89_FCC][0] = 76,
+ [1][1][RTW89_FCC][2] = 76,
+ [1][1][RTW89_FCC][4] = 76,
+ [1][1][RTW89_FCC][6] = 76,
+ [1][1][RTW89_FCC][8] = 76,
+ [1][1][RTW89_FCC][10] = 76,
+ [1][1][RTW89_FCC][12] = 76,
+ [1][1][RTW89_FCC][14] = 76,
+ [1][1][RTW89_FCC][15] = 76,
+ [1][1][RTW89_FCC][17] = 76,
+ [1][1][RTW89_FCC][19] = 76,
+ [1][1][RTW89_FCC][21] = 76,
+ [1][1][RTW89_FCC][23] = 76,
+ [1][1][RTW89_FCC][25] = 76,
+ [1][1][RTW89_FCC][27] = 76,
+ [1][1][RTW89_FCC][29] = 76,
+ [1][1][RTW89_FCC][30] = 76,
+ [1][1][RTW89_FCC][32] = 76,
+ [1][1][RTW89_FCC][34] = 76,
+ [1][1][RTW89_FCC][36] = 76,
+ [1][1][RTW89_FCC][38] = 76,
+ [1][1][RTW89_FCC][40] = 76,
+ [1][1][RTW89_FCC][42] = 76,
+ [1][1][RTW89_FCC][44] = 76,
+ [1][1][RTW89_FCC][45] = 76,
+ [1][1][RTW89_FCC][47] = 76,
+ [1][1][RTW89_FCC][49] = 76,
+ [1][1][RTW89_FCC][51] = 76,
+ [1][1][RTW89_FCC][53] = 76,
+ [1][1][RTW89_FCC][55] = 76,
+ [1][1][RTW89_FCC][57] = 76,
+ [1][1][RTW89_FCC][59] = 76,
+ [1][1][RTW89_FCC][60] = 76,
+ [1][1][RTW89_FCC][62] = 76,
+ [1][1][RTW89_FCC][64] = 76,
+ [1][1][RTW89_FCC][66] = 76,
+ [1][1][RTW89_FCC][68] = 76,
+ [1][1][RTW89_FCC][70] = 76,
+ [1][1][RTW89_FCC][72] = 76,
+ [1][1][RTW89_FCC][74] = 76,
+ [1][1][RTW89_FCC][75] = 76,
+ [1][1][RTW89_FCC][77] = 76,
+ [1][1][RTW89_FCC][79] = 76,
+ [1][1][RTW89_FCC][81] = 76,
+ [1][1][RTW89_FCC][83] = 76,
+ [1][1][RTW89_FCC][85] = 76,
+ [1][1][RTW89_FCC][87] = 76,
+ [1][1][RTW89_FCC][89] = 76,
+ [1][1][RTW89_FCC][90] = 76,
+ [1][1][RTW89_FCC][92] = 76,
+ [1][1][RTW89_FCC][94] = 76,
+ [1][1][RTW89_FCC][96] = 76,
+ [1][1][RTW89_FCC][98] = 76,
+ [1][1][RTW89_FCC][100] = 76,
+ [1][1][RTW89_FCC][102] = 76,
+ [1][1][RTW89_FCC][104] = 76,
+ [1][1][RTW89_FCC][105] = 76,
+ [1][1][RTW89_FCC][107] = 76,
+ [1][1][RTW89_FCC][109] = 76,
+ [1][1][RTW89_FCC][111] = 127,
+ [1][1][RTW89_FCC][113] = 127,
+ [1][1][RTW89_FCC][115] = 127,
+ [1][1][RTW89_FCC][117] = 127,
+ [1][1][RTW89_FCC][119] = 127,
+ [2][0][RTW89_FCC][0] = 76,
+ [2][0][RTW89_FCC][2] = 76,
+ [2][0][RTW89_FCC][4] = 76,
+ [2][0][RTW89_FCC][6] = 76,
+ [2][0][RTW89_FCC][8] = 76,
+ [2][0][RTW89_FCC][10] = 76,
+ [2][0][RTW89_FCC][12] = 76,
+ [2][0][RTW89_FCC][14] = 76,
+ [2][0][RTW89_FCC][15] = 76,
+ [2][0][RTW89_FCC][17] = 76,
+ [2][0][RTW89_FCC][19] = 76,
+ [2][0][RTW89_FCC][21] = 76,
+ [2][0][RTW89_FCC][23] = 76,
+ [2][0][RTW89_FCC][25] = 76,
+ [2][0][RTW89_FCC][27] = 76,
+ [2][0][RTW89_FCC][29] = 76,
+ [2][0][RTW89_FCC][30] = 76,
+ [2][0][RTW89_FCC][32] = 76,
+ [2][0][RTW89_FCC][34] = 76,
+ [2][0][RTW89_FCC][36] = 76,
+ [2][0][RTW89_FCC][38] = 76,
+ [2][0][RTW89_FCC][40] = 76,
+ [2][0][RTW89_FCC][42] = 76,
+ [2][0][RTW89_FCC][44] = 76,
+ [2][0][RTW89_FCC][45] = 76,
+ [2][0][RTW89_FCC][47] = 76,
+ [2][0][RTW89_FCC][49] = 76,
+ [2][0][RTW89_FCC][51] = 76,
+ [2][0][RTW89_FCC][53] = 76,
+ [2][0][RTW89_FCC][55] = 76,
+ [2][0][RTW89_FCC][57] = 76,
+ [2][0][RTW89_FCC][59] = 76,
+ [2][0][RTW89_FCC][60] = 76,
+ [2][0][RTW89_FCC][62] = 76,
+ [2][0][RTW89_FCC][64] = 76,
+ [2][0][RTW89_FCC][66] = 76,
+ [2][0][RTW89_FCC][68] = 76,
+ [2][0][RTW89_FCC][70] = 76,
+ [2][0][RTW89_FCC][72] = 76,
+ [2][0][RTW89_FCC][74] = 76,
+ [2][0][RTW89_FCC][75] = 76,
+ [2][0][RTW89_FCC][77] = 76,
+ [2][0][RTW89_FCC][79] = 76,
+ [2][0][RTW89_FCC][81] = 76,
+ [2][0][RTW89_FCC][83] = 76,
+ [2][0][RTW89_FCC][85] = 76,
+ [2][0][RTW89_FCC][87] = 76,
+ [2][0][RTW89_FCC][89] = 76,
+ [2][0][RTW89_FCC][90] = 76,
+ [2][0][RTW89_FCC][92] = 76,
+ [2][0][RTW89_FCC][94] = 76,
+ [2][0][RTW89_FCC][96] = 76,
+ [2][0][RTW89_FCC][98] = 76,
+ [2][0][RTW89_FCC][100] = 76,
+ [2][0][RTW89_FCC][102] = 76,
+ [2][0][RTW89_FCC][104] = 76,
+ [2][0][RTW89_FCC][105] = 76,
+ [2][0][RTW89_FCC][107] = 76,
+ [2][0][RTW89_FCC][109] = 76,
+ [2][0][RTW89_FCC][111] = 127,
+ [2][0][RTW89_FCC][113] = 127,
+ [2][0][RTW89_FCC][115] = 127,
+ [2][0][RTW89_FCC][117] = 127,
+ [2][0][RTW89_FCC][119] = 127,
+ [2][1][RTW89_FCC][0] = 76,
+ [2][1][RTW89_FCC][2] = 76,
+ [2][1][RTW89_FCC][4] = 76,
+ [2][1][RTW89_FCC][6] = 76,
+ [2][1][RTW89_FCC][8] = 76,
+ [2][1][RTW89_FCC][10] = 76,
+ [2][1][RTW89_FCC][12] = 76,
+ [2][1][RTW89_FCC][14] = 76,
+ [2][1][RTW89_FCC][15] = 76,
+ [2][1][RTW89_FCC][17] = 76,
+ [2][1][RTW89_FCC][19] = 76,
+ [2][1][RTW89_FCC][21] = 76,
+ [2][1][RTW89_FCC][23] = 76,
+ [2][1][RTW89_FCC][25] = 76,
+ [2][1][RTW89_FCC][27] = 76,
+ [2][1][RTW89_FCC][29] = 76,
+ [2][1][RTW89_FCC][30] = 76,
+ [2][1][RTW89_FCC][32] = 76,
+ [2][1][RTW89_FCC][34] = 76,
+ [2][1][RTW89_FCC][36] = 76,
+ [2][1][RTW89_FCC][38] = 76,
+ [2][1][RTW89_FCC][40] = 76,
+ [2][1][RTW89_FCC][42] = 76,
+ [2][1][RTW89_FCC][44] = 76,
+ [2][1][RTW89_FCC][45] = 76,
+ [2][1][RTW89_FCC][47] = 76,
+ [2][1][RTW89_FCC][49] = 76,
+ [2][1][RTW89_FCC][51] = 76,
+ [2][1][RTW89_FCC][53] = 76,
+ [2][1][RTW89_FCC][55] = 76,
+ [2][1][RTW89_FCC][57] = 76,
+ [2][1][RTW89_FCC][59] = 76,
+ [2][1][RTW89_FCC][60] = 76,
+ [2][1][RTW89_FCC][62] = 76,
+ [2][1][RTW89_FCC][64] = 76,
+ [2][1][RTW89_FCC][66] = 76,
+ [2][1][RTW89_FCC][68] = 76,
+ [2][1][RTW89_FCC][70] = 76,
+ [2][1][RTW89_FCC][72] = 76,
+ [2][1][RTW89_FCC][74] = 76,
+ [2][1][RTW89_FCC][75] = 76,
+ [2][1][RTW89_FCC][77] = 76,
+ [2][1][RTW89_FCC][79] = 76,
+ [2][1][RTW89_FCC][81] = 76,
+ [2][1][RTW89_FCC][83] = 76,
+ [2][1][RTW89_FCC][85] = 76,
+ [2][1][RTW89_FCC][87] = 76,
+ [2][1][RTW89_FCC][89] = 76,
+ [2][1][RTW89_FCC][90] = 76,
+ [2][1][RTW89_FCC][92] = 76,
+ [2][1][RTW89_FCC][94] = 76,
+ [2][1][RTW89_FCC][96] = 76,
+ [2][1][RTW89_FCC][98] = 76,
+ [2][1][RTW89_FCC][100] = 76,
+ [2][1][RTW89_FCC][102] = 76,
+ [2][1][RTW89_FCC][104] = 76,
+ [2][1][RTW89_FCC][105] = 76,
+ [2][1][RTW89_FCC][107] = 76,
+ [2][1][RTW89_FCC][109] = 76,
+ [2][1][RTW89_FCC][111] = 127,
+ [2][1][RTW89_FCC][113] = 127,
+ [2][1][RTW89_FCC][115] = 127,
+ [2][1][RTW89_FCC][117] = 127,
+ [2][1][RTW89_FCC][119] = 127,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_bb_table = {
+ .regs = rtw89_8852c_phy_bb_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table = {
+ .regs = rtw89_8852c_phy_bb_reg_gain,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_reg_gain),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_radioa_table = {
+ .regs = rtw89_8852c_phy_radioa_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radioa_regs),
+ .rf_path = RF_PATH_A,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_radiob_table = {
+ .regs = rtw89_8852c_phy_radiob_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radiob_regs),
+ .rf_path = RF_PATH_B,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = {
+ .regs = rtw89_8852c_phy_nctl_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_nctl_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_txpwr_table rtw89_8852c_byr_table = {
+ .data = rtw89_8852c_txpwr_byrate,
+ .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate),
+ .load = rtw89_phy_load_txpwr_byrate,
+};
+
+const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = {
+ .delta_swingidx_6gb_n = _txpwr_track_delta_swingidx_6gb_n,
+ .delta_swingidx_6gb_p = _txpwr_track_delta_swingidx_6gb_p,
+ .delta_swingidx_6ga_n = _txpwr_track_delta_swingidx_6ga_n,
+ .delta_swingidx_6ga_p = _txpwr_track_delta_swingidx_6ga_p,
+ .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n,
+ .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p,
+ .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n,
+ .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p,
+ .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n,
+ .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p,
+ .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n,
+ .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p,
+ .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n,
+ .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p,
+ .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n,
+ .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p,
+};
+
+const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = {
+ .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h
new file mode 100644
index 000000000000..7d71a92e2d27
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_TABLE_H__
+#define __RTW89_8852C_TABLE_H__
+
+#include "core.h"
+
+extern const struct rtw89_phy_table rtw89_8852c_phy_bb_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table;
+extern const struct rtw89_txpwr_table rtw89_8852c_byr_table;
+extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table;
+extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg;
+extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index e71370585b4d..fc0394494013 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -9,8 +9,56 @@
#include "reg.h"
#include "rtw8852c.h"
+static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
+ .tx_bd_addrs = {R_AX_DRV_FW_HSK_0, R_AX_DRV_FW_HSK_1, R_AX_DRV_FW_HSK_2,
+ R_AX_DRV_FW_HSK_3, 0, 0,
+ 0, 0, R_AX_DRV_FW_HSK_4,
+ 0, 0, 0,
+ R_AX_DRV_FW_HSK_5},
+ .rx_bd_addrs = {R_AX_DRV_FW_HSK_6, R_AX_DRV_FW_HSK_7},
+};
+
static const struct rtw89_pci_info rtw8852c_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_V1_256B,
+ .rx_burst = MAC_AX_RX_BURST_V1_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_ENABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_HAXI_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN_V1,
+ .rxhci_en_bit = B_AX_RXHCI_EN_V1,
+ .rxbd_mode_bit = B_AX_RXBD_MODE_V1,
+ .exp_ctrl_reg = R_AX_HAXI_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1,
+ .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1,
+ .dma_stop1_reg = R_AX_HAXI_DMA_STOP1,
+ .dma_stop2_reg = R_AX_HAXI_DMA_STOP2,
+ .dma_busy1_reg = R_AX_HAXI_DMA_BUSY1,
+ .dma_busy2_reg = R_AX_HAXI_DMA_BUSY2,
+ .dma_busy3_reg = R_AX_HAXI_DMA_BUSY3,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM_V1,
+ .cpwm_addr = R_AX_PCIE_CRPWM,
+ .bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+
+ .ltr_set = rtw89_pci_ltr_set_v1,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .config_intr_mask = rtw89_pci_config_intr_mask_v1,
+ .enable_intr = rtw89_pci_enable_intr_v1,
+ .disable_intr = rtw89_pci_disable_intr_v1,
+ .recognize_intrs = rtw89_pci_recognize_intrs_v1,
};
static const struct rtw89_driver_info rtw89_8852ce_info = {
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 837cdc366a61..9e95ed972710 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -2,10 +2,14 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/devcoredump.h>
+
#include "cam.h"
#include "debug.h"
+#include "fw.h"
#include "mac.h"
#include "ps.h"
+#include "reg.h"
#include "ser.h"
#include "util.h"
@@ -67,6 +71,80 @@ static char *ser_st_name(struct rtw89_ser *ser)
return "err_st_name";
}
+#define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
+struct ser_cd_ ## _name { \
+ u32 type; \
+ u32 type_size; \
+ u64 padding; \
+ u8 data[_size]; \
+} __packed; \
+static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
+{ \
+ p->type = _type; \
+ p->type_size = sizeof(p->data); \
+ p->padding = 0x0123456789abcdef; \
+}
+
+enum rtw89_ser_cd_type {
+ RTW89_SER_CD_FW_RSVD_PLE = 0,
+ RTW89_SER_CD_FW_BACKTRACE = 1,
+};
+
+RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
+ RTW89_SER_CD_FW_RSVD_PLE,
+ RTW89_FW_RSVD_PLE_SIZE);
+
+RTW89_DEF_SER_CD_TYPE(fw_backtrace,
+ RTW89_SER_CD_FW_BACKTRACE,
+ RTW89_FW_BACKTRACE_MAX_SIZE);
+
+struct rtw89_ser_cd_buffer {
+ struct ser_cd_fw_rsvd_ple fwple;
+ struct ser_cd_fw_backtrace fwbt;
+} __packed;
+
+static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ser_cd_buffer *buf;
+
+ buf = vzalloc(sizeof(*buf));
+ if (!buf)
+ return NULL;
+
+ ser_cd_fw_rsvd_ple_init(&buf->fwple);
+ ser_cd_fw_backtrace_init(&buf->fwbt);
+
+ return buf;
+}
+
+static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
+ struct rtw89_ser_cd_buffer *buf)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
+
+ /* After calling dev_coredump, buf's lifetime is supposed to be
+ * handled by the device coredump framework. Note that a new dump
+ * will be discarded if a previous one hasn't been released by
+ * framework yet.
+ */
+ dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
+}
+
+static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
+ struct rtw89_ser_cd_buffer *buf, bool free_self)
+{
+ if (!free_self)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
+
+ /* When some problems happen during filling data of core dump,
+ * we won't send it to device coredump framework. Instead, we
+ * free buf by ourselves.
+ */
+ vfree(buf);
+}
+
static void ser_state_run(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
@@ -220,11 +298,32 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwvif->trigger = false;
}
+static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+}
+
+static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ ser_sta_deinit_addr_cam_iter,
+ rtwdev);
+
+ rtw89_cam_deinit(rtwdev, rtwvif);
+}
+
static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
{
struct rtw89_vif *rtwvif;
rtw89_cam_reset_keys(rtwdev);
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ ser_deinit_cam(rtwdev, rtwvif);
+
rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
ser_reset_vif(rtwdev, rtwvif);
@@ -281,8 +380,11 @@ static void hal_send_m4_event(struct rtw89_ser *ser)
/* state handler */
static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
switch (evt) {
case SER_EV_STATE_IN:
+ rtw89_hci_recovery_complete(rtwdev);
break;
case SER_EV_L1_RESET:
ser_state_goto(ser, SER_RESET_TRX_ST);
@@ -291,6 +393,8 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
ser_state_goto(ser, SER_L2_RESET_ST);
break;
case SER_EV_STATE_OUT:
+ rtw89_hci_recovery_start(rtwdev);
+ break;
default:
break;
}
@@ -365,6 +469,138 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
}
}
+static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
+ u8 sel, u32 start_addr, u32 len)
+{
+ u32 *ptr = (u32 *)buf;
+ u32 base_addr, start_page, residue;
+ u32 cnt = 0;
+ u32 i;
+
+ start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
+ residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr = rtw89_mac_mem_base_addrs[sel];
+ base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
+
+ while (cnt < len) {
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
+
+ for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
+ i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
+ i += 4, ptr++) {
+ *ptr = rtw89_read32(rtwdev, i);
+ cnt += 4;
+ if (cnt >= len)
+ break;
+ }
+
+ residue = 0;
+ base_addr += MAC_MEM_DUMP_PAGE_SIZE;
+ }
+}
+
+static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
+{
+ u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
+ start_addr);
+ ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
+ RTW89_FW_RSVD_PLE_SIZE);
+}
+
+struct __fw_backtrace_entry {
+ u32 wcpu_addr;
+ u32 size;
+ u32 key;
+} __packed;
+
+struct __fw_backtrace_info {
+ u32 ra;
+ u32 sp;
+} __packed;
+
+static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
+ sizeof(struct __fw_backtrace_info));
+
+static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
+ const struct __fw_backtrace_entry *ent)
+{
+ struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
+ u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
+ u32 fwbt_size = ent->size;
+ u32 fwbt_key = ent->key;
+ u32 i;
+
+ if (fwbt_addr == 0) {
+ rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
+ fwbt_addr);
+ return -EINVAL;
+ }
+
+ if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
+ rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
+ fwbt_key);
+ return -EINVAL;
+ }
+
+ if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
+ fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
+ rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
+ fwbt_size);
+ return -EINVAL;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
+
+ for (i = R_AX_INDIR_ACCESS_ENTRY;
+ i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
+ i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
+ *ptr = (struct __fw_backtrace_info){
+ .ra = rtw89_read32(rtwdev, i),
+ .sp = rtw89_read32(rtwdev, i + 4),
+ };
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "next sp: 0x%x, next ra: 0x%x\n",
+ ptr->sp, ptr->ra);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
+ return 0;
+}
+
+static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ struct rtw89_ser_cd_buffer *buf;
+ struct __fw_backtrace_entry fwbt_ent;
+ int ret = 0;
+
+ buf = rtw89_ser_cd_prep(rtwdev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto bottom;
+ }
+
+ rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
+
+ fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
+ ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
+ if (ret)
+ goto bottom;
+
+ rtw89_ser_cd_send(rtwdev, buf);
+
+bottom:
+ rtw89_ser_cd_free(rtwdev, buf, !!ret);
+
+ ser_reset_mac_binding(rtwdev);
+ rtw89_core_stop(rtwdev);
+ INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
+}
+
static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
@@ -372,8 +608,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
mutex_lock(&rtwdev->mutex);
- ser_reset_mac_binding(rtwdev);
- rtw89_core_stop(rtwdev);
+ ser_l2_reset_st_pre_hdl(ser);
mutex_unlock(&rtwdev->mutex);
ieee80211_restart_hw(rtwdev->hw);
@@ -385,6 +620,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
fallthrough;
case SER_EV_L2_RECFG_DONE:
ser_state_goto(ser, SER_IDLE_ST);
+ clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
break;
case SER_EV_STATE_OUT:
@@ -396,7 +632,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
}
}
-static struct event_ent ser_ev_tbl[] = {
+static const struct event_ent ser_ev_tbl[] = {
{SER_EV_NONE, "SER_EV_NONE"},
{SER_EV_STATE_IN, "SER_EV_STATE_IN"},
{SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
@@ -412,7 +648,7 @@ static struct event_ent ser_ev_tbl[] = {
{SER_EV_MAXX, "SER_EV_MAX"}
};
-static struct state_ent ser_st_tbl[] = {
+static const struct state_ent ser_st_tbl[] = {
{SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
{SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
{SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
@@ -456,7 +692,7 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
{
u8 event = SER_EV_NONE;
- rtw89_info(rtwdev, "ser event = 0x%04x\n", err);
+ rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
switch (err) {
case MAC_AX_ERR_L1_ERR_DMAC:
@@ -482,8 +718,10 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
break;
}
- if (event == SER_EV_NONE)
+ if (event == SER_EV_NONE) {
+ rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
return -EINVAL;
+ }
ser_send_msg(&rtwdev->ser, event);
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 86e3d8b400d6..b889e7bf34c0 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -24,6 +24,7 @@
/* TX WD BODY DWORD 0 */
#define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24)
+#define RTW89_TXWD_BODY0_WP_OFFSET_V1 GENMASK(28, 24)
#define RTW89_TXWD_BODY0_MORE_DATA BIT(23)
#define RTW89_TXWD_BODY0_WD_INFO_EN BIT(22)
#define RTW89_TXWD_BODY0_FW_DL BIT(20)
@@ -35,7 +36,10 @@
#define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0)
/* TX WD BODY DWORD 1 */
+#define RTW89_TXWD_BODY1_ADDR_INFO_NUM GENMASK(31, 26)
#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
+#define RTW89_TXWD_BODY1_SEC_KEYID GENMASK(5, 4)
+#define RTW89_TXWD_BODY1_SEC_TYPE GENMASK(3, 0)
/* TX WD BODY DWORD 2 */
#define RTW89_TXWD_BODY2_MACID GENMASK(30, 24)
@@ -49,8 +53,22 @@
#define RTW89_TXWD_BODY3_SW_SEQ GENMASK(11, 0)
/* TX WD BODY DWORD 4 */
+#define RTW89_TXWD_BODY4_SEC_IV_L1 GENMASK(31, 24)
+#define RTW89_TXWD_BODY4_SEC_IV_L0 GENMASK(23, 16)
/* TX WD BODY DWORD 5 */
+#define RTW89_TXWD_BODY5_SEC_IV_H5 GENMASK(31, 24)
+#define RTW89_TXWD_BODY5_SEC_IV_H4 GENMASK(23, 16)
+#define RTW89_TXWD_BODY5_SEC_IV_H3 GENMASK(15, 8)
+#define RTW89_TXWD_BODY5_SEC_IV_H2 GENMASK(7, 0)
+
+/* TX WD BODY DWORD 6 (V1) */
+
+/* TX WD BODY DWORD 7 (V1) */
+#define RTW89_TXWD_BODY7_USE_RATE_V1 BIT(31)
+#define RTW89_TXWD_BODY7_DATA_BW GENMASK(29, 28)
+#define RTW89_TXWD_BODY7_GI_LTF GENMASK(27, 25)
+#define RTW89_TXWD_BODY7_DATA_RATE GENMASK(24, 16)
/* TX WD INFO DWORD 0 */
#define RTW89_TXWD_INFO0_USE_RATE BIT(30)
@@ -69,6 +87,7 @@
#define RTW89_TXWD_INFO2_AMPDU_DENSITY GENMASK(20, 18)
#define RTW89_TXWD_INFO2_SEC_TYPE GENMASK(12, 9)
#define RTW89_TXWD_INFO2_SEC_HW_ENC BIT(8)
+#define RTW89_TXWD_INFO2_FORCE_KEY_EN BIT(8)
#define RTW89_TXWD_INFO2_SEC_CAM_IDX GENMASK(7, 0)
/* TX WD INFO DWORD 3 */
@@ -79,6 +98,92 @@
/* TX WD INFO DWORD 5 */
+/* RX WD dword0 */
+#define AX_RXD_RPKT_LEN_MASK GENMASK(13, 0)
+#define AX_RXD_SHIFT_MASK GENMASK(15, 14)
+#define AX_RXD_WL_HD_IV_LEN_MASK GENMASK(21, 16)
+#define AX_RXD_BB_SEL BIT(22)
+#define AX_RXD_MAC_INFO_VLD BIT(23)
+#define AX_RXD_RPKT_TYPE_MASK GENMASK(27, 24)
+#define AX_RXD_DRV_INFO_SIZE_MASK GENMASK(30, 28)
+#define AX_RXD_LONG_RXD BIT(31)
+
+/* RX WD dword1 */
+#define AX_RXD_PPDU_TYPE_MASK GENMASK(3, 0)
+#define AX_RXD_PPDU_CNT_MASK GENMASK(6, 4)
+#define AX_RXD_SR_EN BIT(7)
+#define AX_RXD_USER_ID_MASK GENMASK(15, 8)
+#define AX_RXD_USER_ID_v1_MASK GENMASK(13, 8)
+#define AX_RXD_RX_DATARATE_MASK GENMASK(24, 16)
+#define AX_RXD_RX_GI_LTF_MASK GENMASK(27, 25)
+#define AX_RXD_NON_SRG_PPDU BIT(28)
+#define AX_RXD_INTER_PPDU BIT(29)
+#define AX_RXD_NON_SRG_PPDU_v1 BIT(14)
+#define AX_RXD_INTER_PPDU_v1 BIT(15)
+#define AX_RXD_BW_MASK GENMASK(31, 30)
+#define AX_RXD_BW_v1_MASK GENMASK(31, 29)
+
+/* RX WD dword2 */
+#define AX_RXD_FREERUN_CNT_MASK GENMASK(31, 0)
+
+/* RX WD dword3 */
+#define AX_RXD_A1_MATCH BIT(0)
+#define AX_RXD_SW_DEC BIT(1)
+#define AX_RXD_HW_DEC BIT(2)
+#define AX_RXD_AMPDU BIT(3)
+#define AX_RXD_AMPDU_END_PKT BIT(4)
+#define AX_RXD_AMSDU BIT(5)
+#define AX_RXD_AMSDU_CUT BIT(6)
+#define AX_RXD_LAST_MSDU BIT(7)
+#define AX_RXD_BYPASS BIT(8)
+#define AX_RXD_CRC32_ERR BIT(9)
+#define AX_RXD_ICV_ERR BIT(10)
+#define AX_RXD_MAGIC_WAKE BIT(11)
+#define AX_RXD_UNICAST_WAKE BIT(12)
+#define AX_RXD_PATTERN_WAKE BIT(13)
+#define AX_RXD_GET_CH_INFO_MASK GENMASK(15, 14)
+#define AX_RXD_PATTERN_IDX_MASK GENMASK(20, 16)
+#define AX_RXD_TARGET_IDC_MASK GENMASK(23, 21)
+#define AX_RXD_CHKSUM_OFFLOAD_EN BIT(24)
+#define AX_RXD_WITH_LLC BIT(25)
+#define AX_RXD_RX_STATISTICS BIT(26)
+
+/* RX WD dword4 */
+#define AX_RXD_TYPE_MASK GENMASK(1, 0)
+#define AX_RXD_MC BIT(2)
+#define AX_RXD_BC BIT(3)
+#define AX_RXD_MD BIT(4)
+#define AX_RXD_MF BIT(5)
+#define AX_RXD_PWR BIT(6)
+#define AX_RXD_QOS BIT(7)
+#define AX_RXD_TID_MASK GENMASK(11, 8)
+#define AX_RXD_EOSP BIT(12)
+#define AX_RXD_HTC BIT(13)
+#define AX_RXD_QNULL BIT(14)
+#define AX_RXD_SEQ_MASK GENMASK(27, 16)
+#define AX_RXD_FRAG_MASK GENMASK(31, 28)
+
+/* RX WD dword5 */
+#define AX_RXD_SEC_CAM_IDX_MASK GENMASK(7, 0)
+#define AX_RXD_ADDR_CAM_MASK GENMASK(15, 8)
+#define AX_RXD_MAC_ID_MASK GENMASK(23, 16)
+#define AX_RXD_RX_PL_ID_MASK GENMASK(27, 24)
+#define AX_RXD_ADDR_CAM_VLD BIT(28)
+#define AX_RXD_ADDR_FWD_EN BIT(29)
+#define AX_RXD_RX_PL_MATCH BIT(30)
+
+/* RX WD dword6 */
+#define AX_RXD_MAC_ADDR_MASK GENMASK(31, 0)
+
+/* RX WD dword7 */
+#define AX_RXD_MAC_ADDR_H_MASK GENMASK(15, 0)
+#define AX_RXD_SMART_ANT BIT(16)
+#define AX_RXD_SEC_TYPE_MASK GENMASK(20, 17)
+#define AX_RXD_HDR_CNV BIT(21)
+#define AX_RXD_HDR_OFFSET_MASK GENMASK(26, 22)
+#define AX_RXD_BIP_KEYID BIT(27)
+#define AX_RXD_BIP_ENC BIT(28)
+
/* RX DESC helpers */
/* Short Descriptor */
#define RTW89_GET_RXWD_LONG_RXD(rxdesc) \
@@ -99,6 +204,8 @@
le32_get_bits((rxdesc)->dword0, GENMASK(13, 0))
#define RTW89_GET_RXWD_BW(rxdesc) \
le32_get_bits((rxdesc)->dword1, GENMASK(31, 30))
+#define RTW89_GET_RXWD_BW_V1(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(31, 29))
#define RTW89_GET_RXWD_GI_LTF(rxdesc) \
le32_get_bits((rxdesc)->dword1, GENMASK(27, 25))
#define RTW89_GET_RXWD_DATA_RATE(rxdesc) \
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index 229e81009de6..1ae80b7561da 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -14,4 +14,34 @@
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+/* The result of negative dividend and positive divisor is undefined, but it
+ * should be one case of round-down or round-up. So, make it round-down if the
+ * result is round-up.
+ * Note: the maximum value of divisor is 0x7FFF_FFFF, because we cast it to
+ * signed value to make compiler to use signed divide instruction.
+ */
+static inline s32 s32_div_u32_round_down(s32 dividend, u32 divisor, s32 *remainder)
+{
+ s32 i_divisor = (s32)divisor;
+ s32 i_remainder;
+ s32 quotient;
+
+ quotient = dividend / i_divisor;
+ i_remainder = dividend % i_divisor;
+
+ if (i_remainder < 0) {
+ quotient--;
+ i_remainder += i_divisor;
+ }
+
+ if (remainder)
+ *remainder = i_remainder;
+ return quotient;
+}
+
+static inline s32 s32_div_u32_round_closest(s32 dividend, u32 divisor)
+{
+ return s32_div_u32_round_down(dividend + divisor / 2, divisor, NULL);
+}
+
#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 913e11fb3807..f01e82b90c07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1490,13 +1490,13 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
- common->bitrate_mask[common->band] = sta->supp_rates[common->band];
- common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
- if (sta->ht_cap.ht_supported) {
+ common->bitrate_mask[common->band] = sta->deflink.supp_rates[common->band];
+ common->vif_info[0].is_ht = sta->deflink.ht_cap.ht_supported;
+ if (sta->deflink.ht_cap.ht_supported) {
common->bitrate_mask[NL80211_BAND_2GHZ] =
- sta->supp_rates[NL80211_BAND_2GHZ];
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
common->vif_info[0].sgi = true;
ieee80211_start_tx_ba_session(sta, 0, 0);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 0848f7a7e76c..c14689266fec 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1357,10 +1357,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
is_ht = common->vif_info[0].is_ht;
is_sgi = common->vif_info[0].sgi;
} else {
- rate_bitmap = sta->supp_rates[band];
- is_ht = sta->ht_cap.ht_supported;
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ rate_bitmap = sta->deflink.supp_rates[band];
+ is_ht = sta->deflink.ht_cap.ht_supported;
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
is_sgi = true;
}
diff --git a/drivers/net/wireless/silabs/Kconfig b/drivers/net/wireless/silabs/Kconfig
new file mode 100644
index 000000000000..6262a799bf36
--- /dev/null
+++ b/drivers/net/wireless/silabs/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config WLAN_VENDOR_SILABS
+ bool "Silicon Laboratories devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_SILABS
+
+source "drivers/net/wireless/silabs/wfx/Kconfig"
+
+endif # WLAN_VENDOR_SILABS
diff --git a/drivers/net/wireless/silabs/Makefile b/drivers/net/wireless/silabs/Makefile
new file mode 100644
index 000000000000..c2263ee21006
--- /dev/null
+++ b/drivers/net/wireless/silabs/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/wfx/Kconfig b/drivers/net/wireless/silabs/wfx/Kconfig
index 835a855409d8..835a855409d8 100644
--- a/drivers/staging/wfx/Kconfig
+++ b/drivers/net/wireless/silabs/wfx/Kconfig
diff --git a/drivers/staging/wfx/Makefile b/drivers/net/wireless/silabs/wfx/Makefile
index c8b356f71c99..c8b356f71c99 100644
--- a/drivers/staging/wfx/Makefile
+++ b/drivers/net/wireless/silabs/wfx/Makefile
diff --git a/drivers/staging/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c
index bcea9d5b119c..bcea9d5b119c 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/net/wireless/silabs/wfx/bh.c
diff --git a/drivers/staging/wfx/bh.h b/drivers/net/wireless/silabs/wfx/bh.h
index a44c8b421b7c..a44c8b421b7c 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/net/wireless/silabs/wfx/bh.h
diff --git a/drivers/staging/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h
index ccadfdd6873c..ccadfdd6873c 100644
--- a/drivers/staging/wfx/bus.h
+++ b/drivers/net/wireless/silabs/wfx/bus.h
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
index 51a0d58a9070..51a0d58a9070 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c
index 7fb1afb8ed31..7fb1afb8ed31 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/net/wireless/silabs/wfx/bus_spi.c
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c
index a4b5ffe158e4..a4b5ffe158e4 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/net/wireless/silabs/wfx/data_rx.c
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/net/wireless/silabs/wfx/data_rx.h
index cf708f16d602..cf708f16d602 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/net/wireless/silabs/wfx/data_rx.h
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
index e07381b2ff4d..e07381b2ff4d 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/net/wireless/silabs/wfx/data_tx.c
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h
index 983470705e4b..983470705e4b 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/net/wireless/silabs/wfx/data_tx.h
diff --git a/drivers/staging/wfx/debug.c b/drivers/net/wireless/silabs/wfx/debug.c
index e8265208f9a5..e8265208f9a5 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/net/wireless/silabs/wfx/debug.c
diff --git a/drivers/staging/wfx/debug.h b/drivers/net/wireless/silabs/wfx/debug.h
index 3840575e5e28..3840575e5e28 100644
--- a/drivers/staging/wfx/debug.h
+++ b/drivers/net/wireless/silabs/wfx/debug.h
diff --git a/drivers/staging/wfx/fwio.c b/drivers/net/wireless/silabs/wfx/fwio.c
index 3d1b8a135dc0..3d1b8a135dc0 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/net/wireless/silabs/wfx/fwio.c
diff --git a/drivers/staging/wfx/fwio.h b/drivers/net/wireless/silabs/wfx/fwio.h
index eeea61210eca..eeea61210eca 100644
--- a/drivers/staging/wfx/fwio.h
+++ b/drivers/net/wireless/silabs/wfx/fwio.h
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h
index 8b91b1d4a46b..8b91b1d4a46b 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/net/wireless/silabs/wfx/hif_api_general.h
index 4d400fdc2252..4d400fdc2252 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/net/wireless/silabs/wfx/hif_api_general.h
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/net/wireless/silabs/wfx/hif_api_mib.h
index 7b68b83866c9..7b68b83866c9 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/net/wireless/silabs/wfx/hif_api_mib.h
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/net/wireless/silabs/wfx/hif_rx.c
index 64ca8acb8e4f..64ca8acb8e4f 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_rx.c
diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/net/wireless/silabs/wfx/hif_rx.h
index 96543b81fa77..96543b81fa77 100644
--- a/drivers/staging/wfx/hif_rx.h
+++ b/drivers/net/wireless/silabs/wfx/hif_rx.h
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
index ae3cc5919dcd..ae3cc5919dcd 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h
index 71817a6571f0..71817a6571f0 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.h
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c
index df1bcb1e2c02..df1bcb1e2c02 100644
--- a/drivers/staging/wfx/hif_tx_mib.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h
index bcd4ef6a8497..bcd4ef6a8497 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h
diff --git a/drivers/staging/wfx/hwio.c b/drivers/net/wireless/silabs/wfx/hwio.c
index 3f9750b470be..3f9750b470be 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/net/wireless/silabs/wfx/hwio.c
diff --git a/drivers/staging/wfx/hwio.h b/drivers/net/wireless/silabs/wfx/hwio.h
index c6e7b065b7ff..c6e7b065b7ff 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/net/wireless/silabs/wfx/hwio.h
diff --git a/drivers/staging/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c
index 8f23e8d42bd4..8f23e8d42bd4 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/net/wireless/silabs/wfx/key.c
diff --git a/drivers/staging/wfx/key.h b/drivers/net/wireless/silabs/wfx/key.h
index 2234e36dbbcd..2234e36dbbcd 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/net/wireless/silabs/wfx/key.h
diff --git a/drivers/staging/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index b93b16b900c8..b93b16b900c8 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
diff --git a/drivers/staging/wfx/main.h b/drivers/net/wireless/silabs/wfx/main.h
index 68c665307153..68c665307153 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/net/wireless/silabs/wfx/main.h
diff --git a/drivers/staging/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c
index 729825230db2..729825230db2 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/net/wireless/silabs/wfx/queue.c
diff --git a/drivers/staging/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h
index 4731debca93d..4731debca93d 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/net/wireless/silabs/wfx/queue.h
diff --git a/drivers/staging/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c
index 7f34f0d322f9..7f34f0d322f9 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/net/wireless/silabs/wfx/scan.c
diff --git a/drivers/staging/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h
index 78e3b984f375..78e3b984f375 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/net/wireless/silabs/wfx/scan.h
diff --git a/drivers/staging/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index b1e9fb14d2b4..3297d73c327a 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -441,11 +441,11 @@ static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *i
rcu_read_lock(); /* protect sta */
if (info->bssid && !info->ibss_joined)
sta = ieee80211_find_sta(wvif->vif, info->bssid);
- if (sta && sta->ht_cap.ht_supported)
- ampdu_density = sta->ht_cap.ampdu_density;
- if (sta && sta->ht_cap.ht_supported &&
+ if (sta && sta->deflink.ht_cap.ht_supported)
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ if (sta && sta->deflink.ht_cap.ht_supported &&
!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
- greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ greenfield = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
rcu_read_unlock();
wvif->join_in_progress = false;
diff --git a/drivers/staging/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index c69b2227e9ac..c69b2227e9ac 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
diff --git a/drivers/staging/wfx/traces.h b/drivers/net/wireless/silabs/wfx/traces.h
index e011e8a46bd5..e011e8a46bd5 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/net/wireless/silabs/wfx/traces.h
diff --git a/drivers/staging/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h
index 6594cc647c2f..6594cc647c2f 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/net/wireless/silabs/wfx/wfx.h
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 236022d4ae2a..321df124d449 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1907,10 +1907,10 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
if (info->bssid && !info->ibss_joined)
sta = ieee80211_find_sta(vif, info->bssid);
if (sta) {
- priv->ht_info.ht_cap = sta->ht_cap;
+ priv->ht_info.ht_cap = sta->deflink.ht_cap;
priv->bss_params.operational_rate_set =
cw1200_rate_mask_to_wsm(priv,
- sta->supp_rates[priv->channel->band]);
+ sta->deflink.supp_rates[priv->channel->band]);
priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef);
priv->ht_info.operation_mode = info->ht_operation_mode;
} else {
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 2f921a44f1e2..80fbf740fe6d 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -264,11 +264,9 @@ static ssize_t radar_detection_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
@@ -306,11 +304,9 @@ static ssize_t dynamic_fw_traces_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
@@ -368,11 +364,9 @@ static ssize_t radar_debug_mode_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 8b798b5fcaf5..df6029ef6304 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -178,11 +178,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto free_vector;
- }
do {
if (time_after(jiffies, timeout_time)) {
@@ -1558,11 +1556,11 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wlvif->band];
- if (sta->ht_cap.ht_supported)
+ sta_rates = sta->deflink.supp_rates[wlvif->band];
+ if (sta->deflink.ht_cap.ht_supported)
sta_rates |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
- (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index cce8d75d8b81..eb3d3f0e0b4d 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -52,11 +52,9 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
@@ -108,12 +106,9 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
return;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
-
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
return;
- }
chip_op = arg;
chip_op(wl);
@@ -279,11 +274,9 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
@@ -349,11 +342,9 @@ static ssize_t forced_ps_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
@@ -831,11 +822,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
wl->conf.rx_streaming.interval = value;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
@@ -889,11 +878,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
wl->conf.rx_streaming.always = value;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
@@ -939,11 +926,9 @@ static ssize_t beacon_filtering_write(struct file *file,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
@@ -1021,11 +1006,9 @@ static ssize_t sleep_auth_write(struct file *file,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
@@ -1254,9 +1237,8 @@ static ssize_t fw_logger_write(struct file *file,
}
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
count = ret;
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5669f17b395f..6959efa4bfa9 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -141,11 +141,9 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (!wl->conf.rx_streaming.interval)
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
@@ -174,11 +172,9 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work)
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
@@ -223,11 +219,9 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
@@ -537,11 +531,9 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
while (!done && loopcount--) {
smp_mb__after_atomic();
@@ -838,11 +830,9 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
- error = pm_runtime_get_sync(wl->dev);
- if (error < 0) {
- pm_runtime_put_noidle(wl->dev);
+ error = pm_runtime_resume_and_get(wl->dev);
+ if (error < 0)
return;
- }
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
@@ -937,11 +927,9 @@ static void wl1271_recovery_work(struct work_struct *work)
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
- error = pm_runtime_get_sync(wl->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(wl->dev);
+ if (error < 0)
wl1271_warning("Enable for recovery failed");
- pm_runtime_put_noidle(wl->dev);
- }
wlcore_disable_interrupts_nosync(wl);
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1741,9 +1729,8 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
return ret;
}
@@ -1855,11 +1842,9 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -2060,11 +2045,9 @@ static void wlcore_channel_switch_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_cmd_stop_channel_switch(wl, wlvif);
@@ -2131,11 +2114,9 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
@@ -2591,11 +2572,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
* Call runtime PM only after possible wl12xx_init_fw() above
* is done. Otherwise we do not have interrupts enabled.
*/
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_unlock;
- }
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
@@ -2691,11 +2670,9 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto deinit;
- }
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
@@ -3129,11 +3106,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
@@ -3213,11 +3188,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -3470,11 +3443,9 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_wake_queues;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_wake_queues;
- }
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
@@ -3622,11 +3593,9 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
goto out_unlock;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_unlock;
- }
wlvif->default_key = key_idx;
@@ -3659,11 +3628,9 @@ void wlcore_regdomain_config(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0) {
@@ -3706,11 +3673,9 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
@@ -3749,11 +3714,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
@@ -3800,11 +3763,9 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
@@ -3834,11 +3795,9 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl->ops->sched_scan_stop(wl, wlvif);
@@ -3862,11 +3821,9 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
@@ -3894,11 +3851,9 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
@@ -4439,15 +4394,15 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
- u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
+ u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
/* save the supp_rates of the ap */
- sta_rate_set = sta->supp_rates[wlvif->band];
- if (sta->ht_cap.ht_supported)
+ sta_rate_set = sta->deflink.supp_rates[wlvif->band];
+ if (sta->deflink.ht_cap.ht_supported)
sta_rate_set |=
(rx_mask[0] << HW_HT_RATES_OFFSET) |
(rx_mask[1] << HW_MIMO_RATES_OFFSET);
- sta_ht_cap = sta->ht_cap;
+ sta_ht_cap = sta->deflink.ht_cap;
sta_exists = true;
}
@@ -4653,11 +4608,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
@@ -4714,11 +4667,9 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -4771,11 +4722,9 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
@@ -4823,11 +4772,9 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
@@ -4893,11 +4840,9 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
@@ -4939,11 +4884,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/*
* the txop is confed in units of 32us by the mac80211,
@@ -4987,11 +4930,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
@@ -5225,7 +5166,8 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
if (ret < 0)
return ret;
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
+ true,
wl_sta->hlid);
if (ret)
return ret;
@@ -5305,11 +5247,9 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
@@ -5363,11 +5303,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ba_bitmap = &wl->links[hlid].ba_bitmap;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
@@ -5475,11 +5413,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
@@ -5517,11 +5453,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* TODO: change mac80211 to pass vif as param */
@@ -5611,11 +5545,9 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
@@ -5666,11 +5598,9 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
@@ -5723,11 +5653,9 @@ static int wlcore_roc_completed(struct wl1271 *wl)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = __wlcore_roc_completed(wl);
@@ -5786,8 +5714,9 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
return;
/* this callback is atomic, so schedule a new work */
- wlvif->rc_update_bw = sta->bandwidth;
- memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
+ wlvif->rc_update_bw = sta->deflink.bandwidth;
+ memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
+ sizeof(sta->deflink.ht_cap));
ieee80211_queue_work(hw, &wlvif->rc_update_work);
}
@@ -5808,11 +5737,9 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_sleep;
- }
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 29fa51c37e88..b414305acc32 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -53,11 +53,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 9140b0163474..cf8d909fa826 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -132,9 +132,8 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
- ret = pm_runtime_get_sync(&card->dev);
+ ret = pm_runtime_resume_and_get(&card->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&card->dev);
dev_err(glue->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 35b535c125b6..f0c7e09b314d 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -56,11 +56,9 @@ static ssize_t bt_coex_state_store(struct device *dev,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_acx_sg_enable(wl, wl->sg_enabled);
pm_runtime_mark_last_busy(wl->dev);
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 3a17b9a8207e..3f338b8096c7 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -83,11 +83,9 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
@@ -158,11 +156,9 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index e20e18cd04ae..7bd3ce2f0804 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -855,11 +855,9 @@ void wl1271_tx_work(struct work_struct *work)
int ret;
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index e1bd344c4ebc..e4269e2b0098 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -53,11 +53,9 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
@@ -88,11 +86,9 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_stop(wl);
@@ -135,11 +131,9 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index 5b62cf3b3c42..fad642f9ffd8 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate;
static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
static LIST_HEAD(wwan_hwsim_devs);
static unsigned int wwan_hwsim_dev_idx;
+static struct workqueue_struct *wwan_wq;
struct wwan_hwsim_dev {
struct list_head list;
@@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&port->del_work);
+ queue_work(wwan_wq, &port->del_work);
return count;
}
@@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&dev->del_work);
+ queue_work(wwan_wq, &dev->del_work);
return count;
}
@@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
+ wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ if (!wwan_wq)
+ return -ENOMEM;
+
wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class))
- return PTR_ERR(wwan_hwsim_class);
+ if (IS_ERR(wwan_hwsim_class)) {
+ err = PTR_ERR(wwan_hwsim_class);
+ goto err_wq_destroy;
+ }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void)
return 0;
err_clean_devs:
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+err_wq_destroy:
+ destroy_workqueue(wwan_wq);
return err;
}
@@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void)
{
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
- flush_scheduled_work(); /* Wait deletion works completion */
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+ destroy_workqueue(wwan_wq);
}
module_init(wwan_hwsim_init);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fe8e21ad8ed9..8e035374a370 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -42,7 +42,6 @@
#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
-#define XENVIF_NAPI_WEIGHT 64
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
@@ -739,7 +738,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
atomic_set(&queue->inflight_packets, 0);
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- XENVIF_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
queue->stalled = true;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index de25d7ac41da..1d195429753d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -801,8 +801,6 @@ struct qeth_priv {
u32 brport_features;
};
-#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
-
struct qeth_card {
enum qeth_card_states state;
spinlock_t lock;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d99c5b773e22..ae85179ca49a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -7100,7 +7100,7 @@ int qeth_open(struct net_device *dev)
local_bh_disable();
qeth_for_each_output_queue(card, queue, i) {
netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
- QETH_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
napi_enable(&queue->napi);
napi_schedule(&queue->napi);
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 303461d70af3..92698f79a4e0 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1133,7 +1133,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
return register_netdev(card->dev);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d2f422a9a4f7..ea3b6b18aa6e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1910,7 +1910,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
- netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
return register_netdev(card->dev);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 932acb4e8cbc..fc274737053d 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -86,6 +86,5 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/qlge/Kconfig"
-source "drivers/staging/wfx/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 3ffb35ccfae2..65e317922e3f 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -33,4 +33,3 @@ obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_QLGE) += qlge/
-obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
deleted file mode 100644
index 1b4bc2af94b6..000000000000
--- a/drivers/staging/wfx/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-This is a list of things that need to be done to get this driver out of the
-staging directory.
-
- - As suggested by Felix, rate control could be improved following this idea:
- https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
-
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 7d9cfc730bd4..c29f39c01a9a 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -26,7 +26,7 @@ static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
/* shared constants to be used in various sysctls */
-const int sysctl_vals[] = { -1, 0, 1, 2, 4, 100, 200, 1000, 3000, INT_MAX, 65535 };
+const int sysctl_vals[] = { 0, 1, 2, 3, 4, 100, 200, 1000, 3000, INT_MAX, 65535, -1 };
EXPORT_SYMBOL(sysctl_vals);
const unsigned long sysctl_long_vals[] = { 0, 1, LONG_MAX };
@@ -1333,7 +1333,7 @@ struct ctl_table_header *__register_sysctl_table(
nr_entries++;
header = kzalloc(sizeof(struct ctl_table_header) +
- sizeof(struct ctl_node)*nr_entries, GFP_KERNEL);
+ sizeof(struct ctl_node)*nr_entries, GFP_KERNEL_ACCOUNT);
if (!header)
return NULL;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 88a51b242adc..669d96d074ad 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, \
- &__unused_flags); \
+ NULL, NULL); \
__ret; \
})
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- t_ctx, \
- &__unused_flags); \
+ t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bdb5298735ce..be94833d390a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/bpfptr.h>
+#include <linux/btf.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -147,14 +148,48 @@ struct bpf_map_ops {
bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
- /* BTF name and id of struct allocated by map_alloc */
- const char * const map_btf_name;
+ /* BTF id of struct allocated by map_alloc */
int *map_btf_id;
/* bpf_iter info used to open a seq_file */
const struct bpf_iter_seq_info *iter_seq_info;
};
+enum {
+ /* Support at most 8 pointers in a BPF map value */
+ BPF_MAP_VALUE_OFF_MAX = 8,
+ BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
+ 1 + /* for bpf_spin_lock */
+ 1, /* for bpf_timer */
+};
+
+enum bpf_kptr_type {
+ BPF_KPTR_UNREF,
+ BPF_KPTR_REF,
+};
+
+struct bpf_map_value_off_desc {
+ u32 offset;
+ enum bpf_kptr_type type;
+ struct {
+ struct btf *btf;
+ struct module *module;
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+ } kptr;
+};
+
+struct bpf_map_value_off {
+ u32 nr_off;
+ struct bpf_map_value_off_desc off[];
+};
+
+struct bpf_map_off_arr {
+ u32 cnt;
+ u32 field_off[BPF_MAP_OFF_ARR_MAX];
+ u8 field_sz[BPF_MAP_OFF_ARR_MAX];
+};
+
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
@@ -171,6 +206,7 @@ struct bpf_map {
u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
int spin_lock_off; /* >=0 valid offset, <0 error */
+ struct bpf_map_value_off *kptr_off_tab;
int timer_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
@@ -182,10 +218,7 @@ struct bpf_map {
struct mem_cgroup *memcg;
#endif
char name[BPF_OBJ_NAME_LEN];
- bool bypass_spec_v1;
- bool frozen; /* write-once; write-protected by freeze_mutex */
- /* 14 bytes hole */
-
+ struct bpf_map_off_arr *off_arr;
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
@@ -205,6 +238,8 @@ struct bpf_map {
bool jited;
bool xdp_has_frags;
} owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -217,43 +252,44 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
return map->timer_off >= 0;
}
+static inline bool map_value_has_kptrs(const struct bpf_map *map)
+{
+ return !IS_ERR_OR_NULL(map->kptr_off_tab);
+}
+
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (unlikely(map_value_has_spin_lock(map)))
memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
if (unlikely(map_value_has_timer(map)))
memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
+ if (unlikely(map_value_has_kptrs(map))) {
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ int i;
+
+ for (i = 0; i < tab->nr_off; i++)
+ *(u64 *)(dst + tab->off[i].offset) = 0;
+ }
}
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
- u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
+ u32 curr_off = 0;
+ int i;
- if (unlikely(map_value_has_spin_lock(map))) {
- s_off = map->spin_lock_off;
- s_sz = sizeof(struct bpf_spin_lock);
- }
- if (unlikely(map_value_has_timer(map))) {
- t_off = map->timer_off;
- t_sz = sizeof(struct bpf_timer);
+ if (likely(!map->off_arr)) {
+ memcpy(dst, src, map->value_size);
+ return;
}
- if (unlikely(s_sz || t_sz)) {
- if (s_off < t_off || !s_sz) {
- swap(s_off, t_off);
- swap(s_sz, t_sz);
- }
- memcpy(dst, src, t_off);
- memcpy(dst + t_off + t_sz,
- src + t_off + t_sz,
- s_off - t_off - t_sz);
- memcpy(dst + s_off + s_sz,
- src + s_off + s_sz,
- map->value_size - s_off - s_sz);
- } else {
- memcpy(dst, src, map->value_size);
+ for (i = 0; i < map->off_arr->cnt; i++) {
+ u32 next_off = map->off_arr->field_off[i];
+
+ memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
+ curr_off += map->off_arr->field_sz[i];
}
+ memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
@@ -342,7 +378,18 @@ enum bpf_type_flag {
*/
MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
- __BPF_TYPE_LAST_FLAG = MEM_PERCPU,
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_LAST_FLAG = PTR_UNTRUSTED,
};
/* Max number of base types. */
@@ -391,6 +438,7 @@ enum bpf_arg_type {
ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
__BPF_ARG_TYPE_MAX,
/* Extended arg_types. */
@@ -400,6 +448,7 @@ enum bpf_arg_type {
ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is
* wide enough to hold the higher bits reserved for bpf_type_flag.
@@ -1221,7 +1270,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -1315,83 +1364,22 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
-static __always_inline int
-BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
- const void *ctx, bpf_prog_run_fn run_prog,
- int retval, u32 *ret_flags)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
- u32 func_ret;
-
- run_ctx.retval = retval;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
- item = &array->items[0];
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.prog_item = item;
- func_ret = run_prog(prog, ctx);
- if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
- run_ctx.retval = -EPERM;
- *(ret_flags) |= (func_ret >> 1);
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- rcu_read_unlock();
- migrate_enable();
- return run_ctx.retval;
-}
-
-static __always_inline int
-BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
- const void *ctx, bpf_prog_run_fn run_prog,
- int retval)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
-
- run_ctx.retval = retval;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
- item = &array->items[0];
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.prog_item = item;
- if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
- run_ctx.retval = -EPERM;
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- rcu_read_unlock();
- migrate_enable();
- return run_ctx.retval;
-}
-
static __always_inline u32
-BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
+bpf_prog_run_array(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(array_rcu);
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
if (unlikely(!array))
- goto out;
+ return ret;
+
+ migrate_disable();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
@@ -1400,50 +1388,10 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
item++;
}
bpf_reset_run_ctx(old_run_ctx);
-out:
- rcu_read_unlock();
migrate_enable();
return ret;
}
-/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
- * so BPF programs can request cwr for TCP packets.
- *
- * Current cgroup skb programs can only return 0 or 1 (0 to drop the
- * packet. This macro changes the behavior so the low order bit
- * indicates whether the packet should be dropped (0) or not (1)
- * and the next bit is a congestion notification bit. This could be
- * used by TCP to call tcp_enter_cwr()
- *
- * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
- * 0: drop packet
- * 1: keep packet
- * 2: drop packet and cn
- * 3: keep packet and cn
- *
- * This macro then converts it to one of the NET_XMIT or an error
- * code that is then interpreted as drop packet (and no cn):
- * 0: NET_XMIT_SUCCESS skb should be transmitted
- * 1: NET_XMIT_DROP skb should be dropped and cn
- * 2: NET_XMIT_CN skb should be transmitted and cn
- * 3: -err skb should be dropped
- */
-#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
- ({ \
- u32 _flags = 0; \
- bool _cn; \
- u32 _ret; \
- _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \
- _cn = _flags & BPF_RET_SET_CN; \
- if (_ret && !IS_ERR_VALUE((long)_ret)) \
- _ret = -EFAULT; \
- if (!_ret) \
- _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
- else \
- _ret = (_cn ? NET_XMIT_DROP : _ret); \
- _ret; \
- })
-
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@@ -1497,6 +1445,12 @@ void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
+void bpf_map_free_kptr_off_tab(struct bpf_map *map);
+struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
+bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
+void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
+
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
@@ -1793,7 +1747,8 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
u32 *next_btf_id, enum bpf_type_flag *flag);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
const struct btf *btf, u32 id, int off,
- const struct btf *need_btf, u32 need_type_id);
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 493e63258497..7ea18d4da84b 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem,
- bool uncharge_omem);
+ bool uncharge_omem, bool use_trace_rcu);
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *selem);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3a9d2d7cc6b7..1f1e7f2ea967 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -523,8 +523,7 @@ int check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
- enum bpf_arg_type arg_type,
- bool is_release_func);
+ enum bpf_arg_type arg_type);
int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno);
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 36bc09b8e890..2611cea2c2b6 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -17,6 +17,7 @@ enum btf_kfunc_type {
BTF_KFUNC_TYPE_ACQUIRE,
BTF_KFUNC_TYPE_RELEASE,
BTF_KFUNC_TYPE_RET_NULL,
+ BTF_KFUNC_TYPE_KPTR_ACQUIRE,
BTF_KFUNC_TYPE_MAX,
};
@@ -35,11 +36,19 @@ struct btf_kfunc_id_set {
struct btf_id_set *acquire_set;
struct btf_id_set *release_set;
struct btf_id_set *ret_null_set;
+ struct btf_id_set *kptr_acquire_set;
};
struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX];
};
};
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+typedef void (*btf_dtor_kfunc_t)(void *);
+
extern const struct file_operations btf_fops;
void btf_get(struct btf *btf);
@@ -123,6 +132,8 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
int btf_find_timer(const struct btf *btf, const struct btf_type *t);
+struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
+ const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
@@ -344,6 +355,9 @@ bool btf_kfunc_id_set_contains(const struct btf *btf,
enum btf_kfunc_type type, u32 kfunc_btf_id);
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
const struct btf_kfunc_id_set *s);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -367,6 +381,15 @@ static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
{
return 0;
}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index c11477620403..c205c51d79c9 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -42,8 +42,8 @@ int can_rx_offload_add_manual(struct net_device *dev,
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
unsigned int *frame_len_ptr);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 4af58459a1e7..99dc7bfbcd3c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -71,11 +71,13 @@ enum {
* struct kernel_ethtool_ringparam - RX/TX ring configuration
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
+ u8 tx_push;
u32 cqe_size;
};
@@ -83,10 +85,12 @@ struct kernel_ethtool_ringparam {
* enum ethtool_supported_ring_param - indicator caps for setting ring params
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 9055cb380ee2..db0f4fcfdaf4 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -79,8 +79,9 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
@@ -91,6 +92,12 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *daddr,
int oif);
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
static inline bool icmpv6_is_err(int type)
{
switch (type) {
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 95c831162212..f1f9412b6ac6 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -134,18 +134,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -185,9 +213,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -203,12 +231,49 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
};
/* frame control handling */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 16870f86c74d..ec5ca392eaa3 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -61,6 +61,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_unsolicited_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -339,8 +340,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return (struct raw6_sock *)sk;
}
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -357,7 +357,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ecac96d52e01..00177567cfef 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -340,6 +340,76 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
}
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+
+ return result;
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
deleted file mode 100644
index dacf69516002..000000000000
--- a/include/linux/mlx5/accel.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_H__
-#define __MLX5_ACCEL_H__
-
-#include <linux/mlx5/driver.h>
-
-enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
-};
-
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
-enum mlx5_accel_esp_keymats {
- MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
- MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
-};
-
-enum mlx5_accel_esp_replay {
- MLX5_ACCEL_ESP_REPLAY_NONE,
- MLX5_ACCEL_ESP_REPLAY_BMP,
-};
-
-struct aes_gcm_keymat {
- u64 seq_iv;
- enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
-
- u32 salt;
- u32 icv_len;
-
- u32 key_len;
- u32 aes_key[256 / 32];
-};
-
-struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
- u32 esn;
- __be32 spi;
- u32 seq;
- u32 tfc_pad;
- u32 flags;
- u32 sa_handle;
- enum mlx5_accel_esp_replay replay_type;
- union {
- struct {
- u32 size;
-
- } bmp;
- } replay;
- enum mlx5_accel_esp_keymats keymat_type;
- union {
- struct aes_gcm_keymat aes_gcm;
- } keymat;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
- u8 is_ipv6;
-};
-
-struct mlx5_accel_esp_xfrm {
- struct mlx5_core_dev *mdev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
-};
-
-enum {
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
-};
-
-enum mlx5_accel_ipsec_cap {
- MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
- MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
- MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
- MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
- MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
-};
-
-#ifdef CONFIG_MLX5_ACCEL
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
-
-#else
-
-static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-
-static inline struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
-static inline int
-mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
-
-#endif /* CONFIG_MLX5_ACCEL */
-#endif /* __MLX5_ACCEL_H__ */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9424503eb8d3..ff47d49d8be4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -778,9 +778,6 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_MLX5_ACCEL
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
-#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e3bfed68b08a..8135713b0d2d 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,6 +40,18 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 7d2d0ba82144..7bab3e51c61e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1806,16 +1806,12 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x740];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
- MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
-
- MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
};
enum mlx5_flow_table_miss_action {
@@ -11383,8 +11379,6 @@ enum {
enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B,
};
struct mlx5_ifc_ipsec_obj_bits {
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 07d77323f78a..45c7c0d67635 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -54,7 +54,6 @@ enum {
enum {
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
};
struct mlx5_ifc_fpga_shell_caps_bits {
@@ -387,89 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_tls_extended_cap_bits {
- u8 aes_gcm_128[0x1];
- u8 aes_gcm_256[0x1];
- u8 reserved_at_2[0x1e];
- u8 reserved_at_20[0x20];
- u8 context_capacity_total[0x20];
- u8 context_capacity_rx[0x20];
- u8 context_capacity_tx[0x20];
- u8 reserved_at_a0[0x10];
- u8 tls_counter_size[0x10];
- u8 tls_counters_addr_low[0x20];
- u8 tls_counters_addr_high[0x20];
- u8 rx[0x1];
- u8 tx[0x1];
- u8 tls_v12[0x1];
- u8 tls_v13[0x1];
- u8 lro[0x1];
- u8 ipv6[0x1];
- u8 reserved_at_106[0x1a];
-};
-
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x12];
- u8 v2_command[0x1];
- u8 udp_encap[0x1];
- u8 rx_no_trailer[0x1];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
-};
-
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
-
- u8 success_delete_sa[0x40];
-
- u8 fail_delete_sa[0x40];
-
- u8 dropped_cmd[0x40];
-};
-
enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
@@ -486,131 +402,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18];
};
-enum mlx5_ifc_fpga_ipsec_response_syndrome {
- MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
- MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
- MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
- MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_resp {
- __be32 syndrome;
- union {
- __be32 sw_sa_handle;
- __be32 flags;
- };
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_cmd_opcode {
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
- MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
-};
-
-enum mlx5_ifc_fpga_ipsec_cap {
- MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_cap {
- __be32 cmd;
- __be32 flags;
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_sa_flags {
- MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
- MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
- MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
- MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
- MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
- MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
- MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
-};
-
-enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
- MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_sa_v1 {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
- union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 reserved1[2];
- u8 flags;
- u8 reserved2[2];
-};
-
-struct mlx5_ifc_fpga_ipsec_sa {
- struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
- __be16 udp_sp;
- __be16 udp_dp;
- u8 reserved1[4];
- __be32 esn;
- __be16 vid; /* only 12 bits, rest is reserved */
- __be16 reserved2;
-} __packed;
-
-enum fpga_tls_cmds {
- CMD_SETUP_STREAM = 0x1001,
- CMD_TEARDOWN_STREAM = 0x1002,
- CMD_RESYNC_RX = 0x1003,
-};
-
-#define MLX5_TLS_1_2 (0)
-
-#define MLX5_TLS_ALG_AES_GCM_128 (0)
-#define MLX5_TLS_ALG_AES_GCM_256 (1)
-
-struct mlx5_ifc_tls_cmd_bits {
- u8 command_type[0x20];
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 tls_version[0x2];
- u8 reserved[0x1c];
- u8 swid[0x20];
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 tls_rcd_sn[0x40];
- u8 tcp_sn[0x20];
- u8 tls_implicit_iv[0x20];
- u8 tls_xor_iv[0x40];
- u8 encryption_key[0x100];
- u8 alg[4];
- u8 reserved2[0x1c];
- u8 reserved3[0x4a0];
-};
-
-struct mlx5_ifc_tls_resp_bits {
- u8 syndrome[0x20];
- u8 stream_id[0x20];
- u8 reserved[0x40];
-};
-
-#define MLX5_TLS_COMMAND_SIZE (0x100)
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 28a928b0684b..e96ee1e348cb 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -141,7 +141,7 @@ enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_12X = 1 << 4,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b1fbe21650bb..eaf66e57d891 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -59,7 +59,8 @@ struct dsa_port;
struct ip_tunnel_parm;
struct macsec_context;
struct macsec_ops;
-
+struct netdev_name_node;
+struct sd_flow_limit;
struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
@@ -202,6 +203,7 @@ struct net_device_core_stats {
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long rx_nohandler;
+ unsigned long rx_otherhost_dropped;
} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
@@ -862,6 +864,7 @@ enum net_device_path_type {
DEV_PATH_BRIDGE,
DEV_PATH_PPPOE,
DEV_PATH_DSA,
+ DEV_PATH_MTK_WDMA,
};
struct net_device_path {
@@ -887,6 +890,12 @@ struct net_device_path {
int port;
u16 proto;
} dsa;
+ struct {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+ } mtk_wdma;
};
};
@@ -1013,16 +1022,6 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
-struct netdev_name_node {
- struct hlist_node hlist;
- struct list_head list;
- struct net_device *dev;
- const char *name;
-};
-
-int netdev_name_node_alt_create(struct net_device *dev, const char *name);
-int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
-
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
@@ -1261,6 +1260,10 @@ struct netdev_net_notifier {
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[],
+ * struct net_device *dev,
+ * u16 vid,
+ * struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
@@ -1511,6 +1514,11 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid);
+ int (*ndo_fdb_del_bulk)(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ u16 vid,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -2491,37 +2499,53 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define NAPI_POLL_WEIGHT 64
+void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
+
/**
- * netif_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add() - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
+ * @weight: default weight
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+static inline void
+netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
+
+static inline void
+netif_napi_add_tx_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
+
+#define netif_tx_napi_add netif_napi_add_tx_weight
/**
- * netif_tx_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
-static inline void netif_tx_napi_add(struct net_device *dev,
+static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
+ int (*poll)(struct napi_struct *, int))
{
- set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
- netif_napi_add(dev, napi, poll, weight);
+ netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/**
@@ -2932,10 +2956,20 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
+
+static inline int dev_queue_xmit_accel(struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return __dev_queue_xmit(skb, sb_dev);
+}
+
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
int ret;
@@ -2968,7 +3002,6 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
@@ -3027,19 +3060,6 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
- u64 count;
- unsigned int num_buckets;
- unsigned int history_head;
- u16 history[FLOW_LIMIT_HISTORY];
- u8 buckets[];
-};
-
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
-
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -3067,6 +3087,9 @@ struct softnet_data {
struct {
u16 recursion;
u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
} xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
@@ -3084,6 +3107,11 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ /* Another possibly contended cache line */
+ spinlock_t defer_lock ____cacheline_aligned_in_smp;
+ int defer_count;
+ struct sk_buff *defer_list;
+ call_single_data_t defer_csd;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -3763,7 +3791,6 @@ int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
@@ -3775,13 +3802,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net,
return __dev_change_net_namespace(dev, net, pat, 0);
}
int __dev_set_mtu(struct net_device *, int);
-int dev_validate_mtu(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
-int dev_set_mtu_ext(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
-int dev_change_tx_queue_len(struct net_device *, unsigned long);
-void dev_set_group(struct net_device *, int);
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
@@ -3789,24 +3810,13 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
- char *name, size_t len);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
- u32 value);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
-typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, int expected_fd, u32 flags);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
@@ -3868,6 +3878,7 @@ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
@@ -3888,12 +3899,6 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
-
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
-
static inline void __dev_put(struct net_device *dev)
{
if (dev) {
@@ -4010,10 +4015,7 @@ static inline void dev_replace_track(struct net_device *odev,
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
-
-void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -4459,9 +4461,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
-void dev_addr_check(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -4551,7 +4550,6 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
@@ -4569,11 +4567,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int netdev_max_backlog;
-extern int netdev_tstamp_prequeue;
-extern int netdev_unregister_timeout_secs;
-extern int weight_p;
-extern int dev_weight_rx_bias;
-extern int dev_weight_tx_bias;
extern int dev_rx_weight;
extern int dev_tx_weight;
extern int gro_normal_batch;
@@ -4761,12 +4754,6 @@ static inline void netdev_rx_csum_fault(struct net_device *dev,
void net_enable_timestamp(void);
void net_disable_timestamp(void);
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
-
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
@@ -4802,8 +4789,6 @@ extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
-void linkwatch_run_queue(void);
-
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 36ca2b5c2253..2d12054932ba 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -65,7 +65,7 @@ extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
-extern const int phy_basic_t1_features_array[2];
+extern const int phy_basic_t1_features_array[3];
extern const int phy_gbit_features_array[2];
extern const int phy_10gbit_features_array[1];
@@ -570,6 +570,7 @@ struct macsec_ops;
* @autoneg_complete: Flag auto negotiation of the link has completed
* @mdix: Current crossover
* @mdix_ctrl: User setting of crossover
+ * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
* @interrupts: Flag interrupts have been enabled
* @interface: enum phy_interface_t value
* @skb: Netlink message for cable diagnostics
@@ -698,6 +699,8 @@ struct phy_device {
u8 mdix;
u8 mdix_ctrl;
+ int pma_extable;
+
void (*phy_link_change)(struct phy_device *phydev, bool up);
void (*adjust_link)(struct net_device *dev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 223781622b33..6d06896fc20d 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -160,11 +160,6 @@ struct phylink_mac_ops {
* clearing unsupported speeds and duplex settings. The port modes
* should not be cleared; phylink_set_port_modes() will help with this.
*
- * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly. See phylink_helper_basex_speed().
- *
* When @config->supported_interfaces has been set, phylink will iterate
* over the supported interfaces to determine the full capability of the
* MAC. The validation function must not print errors if @state->interface
@@ -579,7 +574,6 @@ int phylink_speed_up(struct phylink *pl);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
-void phylink_helper_basex_speed(struct phylink_link_state *state);
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
u16 bmsr, u16 lpa);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 554454cb8693..e8cc8b6bbf50 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -321,6 +321,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{ return -1; }
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
diff --git a/include/linux/qed/qed_nvmetcp_ip_services_if.h b/include/linux/qed/qed_nvmetcp_ip_services_if.h
deleted file mode 100644
index 3604aee53796..000000000000
--- a/include/linux/qed/qed_nvmetcp_ip_services_if.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
-/*
- * Copyright 2021 Marvell. All rights reserved.
- */
-
-#ifndef _QED_IP_SERVICES_IF_H
-#define _QED_IP_SERVICES_IF_H
-
-#include <linux/types.h>
-#include <net/route.h>
-#include <net/ip6_route.h>
-#include <linux/inetdevice.h>
-
-int qed_route_ipv4(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev);
-int qed_route_ipv6(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev);
-void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id);
-struct pci_dev *qed_validate_ndev(struct net_device *ndev);
-void qed_return_tcp_port(struct socket *sock);
-int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
- struct socket **sock, u16 *port);
-__be16 qed_get_in_port(struct sockaddr_storage *sa);
-
-#endif /* _QED_IP_SERVICES_IF_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7f970b16da3a..ae2c6a3cec5d 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -100,6 +100,7 @@ void net_dec_ingress_queue(void);
#ifdef CONFIG_NET_EGRESS
void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
+void netdev_xmit_skip_txqueue(bool skip);
#endif
void rtnetlink_init(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3a30cae8b0a5..3270cb72e4d8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -381,6 +381,19 @@ enum skb_drop_reason {
* the ofo queue, corresponding to
* LINUX_MIB_TCPOFOMERGE
*/
+ SKB_DROP_REASON_TCP_RFC7323_PAWS, /* PAWS check, corresponding to
+ * LINUX_MIB_PAWSESTABREJECTED
+ */
+ SKB_DROP_REASON_TCP_INVALID_SEQUENCE, /* Not acceptable SEQ field */
+ SKB_DROP_REASON_TCP_RESET, /* Invalid RST packet */
+ SKB_DROP_REASON_TCP_INVALID_SYN, /* Incoming packet has unexpected SYN flag */
+ SKB_DROP_REASON_TCP_CLOSE, /* TCP socket in CLOSE state */
+ SKB_DROP_REASON_TCP_FASTOPEN, /* dropped by FASTOPEN request socket */
+ SKB_DROP_REASON_TCP_OLD_ACK, /* TCP ACK is old, but in window */
+ SKB_DROP_REASON_TCP_TOO_OLD_ACK, /* TCP ACK is too old */
+ SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, /* TCP ACK for data we haven't sent yet */
+ SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, /* pruned from TCP OFO queue */
+ SKB_DROP_REASON_TCP_OFO_DROP, /* data already in receive queue */
SKB_DROP_REASON_IP_OUTNOROUTES, /* route lookup failed */
SKB_DROP_REASON_BPF_CGROUP_EGRESS, /* dropped by
* BPF_PROG_TYPE_CGROUP_SKB
@@ -408,11 +421,9 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_XDP, /* dropped by XDP in input path */
SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */
- SKB_DROP_REASON_PTYPE_ABSENT, /* not packet_type found to handle
- * the skb. For an etner packet,
- * this means that L3 protocol is
- * not supported
- */
+ SKB_DROP_REASON_UNHANDLED_PROTO, /* protocol not implemented
+ * or not supported
+ */
SKB_DROP_REASON_SKB_CSUM, /* sk_buff checksum computation
* error
*/
@@ -444,9 +455,35 @@ enum skb_drop_reason {
SKB_DROP_REASON_TAP_TXFILTER, /* dropped by tx filter implemented
* at tun/tap, e.g., check_filter()
*/
+ SKB_DROP_REASON_ICMP_CSUM, /* ICMP checksum error */
+ SKB_DROP_REASON_INVALID_PROTO, /* the packet doesn't follow RFC
+ * 2211, such as a broadcasts
+ * ICMP_TIMESTAMP
+ */
+ SKB_DROP_REASON_IP_INADDRERRORS, /* host unreachable, corresponding
+ * to IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INNOROUTES, /* network unreachable, corresponding
+ * to IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_PKT_TOO_BIG, /* packet size is too big (maybe exceed
+ * the MTU)
+ */
SKB_DROP_REASON_MAX,
};
+#define SKB_DR_INIT(name, reason) \
+ enum skb_drop_reason name = SKB_DROP_REASON_##reason
+#define SKB_DR(name) \
+ SKB_DR_INIT(name, NOT_SPECIFIED)
+#define SKB_DR_SET(name, reason) \
+ (name = SKB_DROP_REASON_##reason)
+#define SKB_DR_OR(name, reason) \
+ do { \
+ if (name == SKB_DROP_REASON_NOT_SPECIFIED) \
+ SKB_DR_SET(name, reason); \
+ } while (0)
+
/* To allow 64K frame to be packed as single skb without frag_list we
* require 64K/PAGE_SIZE pages plus 1 additional page to allow for
* buffers which do not start on a page boundary.
@@ -647,20 +684,6 @@ struct ubuf_info {
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-
-void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success);
-
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
-int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
- struct msghdr *msg, int len,
- struct ubuf_info *uarg);
-
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -851,6 +874,7 @@ typedef unsigned char *sk_buff_data_t;
* delivery_time at egress.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
+ * @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available
@@ -1043,6 +1067,7 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ u16 alloc_cpu;
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
@@ -1284,6 +1309,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+void skb_attempt_defer_free(struct sk_buff *skb);
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
@@ -1639,6 +1665,28 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
}
#endif
+struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
+
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+
+void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
+
+int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
+static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
+ struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
+}
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg);
+
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -3836,8 +3884,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
- int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
@@ -3886,7 +3933,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
-int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..7e00cca06709
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,131 @@
+#ifndef __MTK_WED_H
+#define __MTK_WED_H
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/regmap.h>
+#include <linux/pci.h>
+
+#define MTK_WED_TX_QUEUES 2
+
+struct mtk_wed_hw;
+struct mtk_wdma_desc;
+
+struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ int size;
+
+ u32 reg_base;
+ void __iomem *wpdma;
+};
+
+struct mtk_wed_device {
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+ struct device *dev;
+ struct mtk_wed_hw *hw;
+ bool init_done, running;
+ int wdma_idx;
+ int irq;
+
+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+
+ struct {
+ int size;
+ void **pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } buf_ring;
+
+ /* filled by driver: */
+ struct {
+ struct pci_dev *pci_dev;
+
+ u32 wpdma_phys;
+
+ u16 token_start;
+ unsigned int nbuf;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+ } wlan;
+#endif
+};
+
+struct mtk_wed_ops {
+ int (*attach)(struct mtk_wed_device *dev);
+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+ void (*detach)(struct mtk_wed_device *dev);
+
+ void (*stop)(struct mtk_wed_device *dev);
+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
+ void (*reset_dma)(struct mtk_wed_device *dev);
+
+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
+
+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+};
+
+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+
+static inline int
+mtk_wed_device_attach(struct mtk_wed_device *dev)
+{
+ int ret = -ENODEV;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ rcu_read_lock();
+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
+ if (dev->ops)
+ ret = dev->ops->attach(dev);
+ else
+ rcu_read_unlock();
+
+ if (ret)
+ dev->ops = NULL;
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+#define mtk_wed_device_active(_dev) !!(_dev)->ops
+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
+#define mtk_wed_device_reg_read(_dev, _reg) \
+ (_dev)->ops->reg_read(_dev, _reg)
+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
+ (_dev)->ops->reg_write(_dev, _reg, _val)
+#define mtk_wed_device_irq_get(_dev, _mask) \
+ (_dev)->ops->irq_get(_dev, _mask)
+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
+ (_dev)->ops->irq_set_mask(_dev, _mask)
+#else
+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+{
+ return false;
+}
+#define mtk_wed_device_detach(_dev) do {} while (0)
+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_reg_read(_dev, _reg) 0
+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
+#endif
+
+#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6f85f5d957ef..12085c9a8544 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -50,6 +50,9 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
+
+ int msg_inq; /* output, data left in socket */
+
struct iov_iter msg_iter; /* data */
/*
@@ -62,8 +65,9 @@ struct msghdr {
void __user *msg_control_user;
};
bool msg_control_is_user : 1;
- __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6353d6db69b2..80263f7cdb77 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -38,10 +38,10 @@ struct ctl_table_header;
struct ctl_dir;
/* Keep the same order as in fs/proc/proc_sysctl.c */
-#define SYSCTL_NEG_ONE ((void *)&sysctl_vals[0])
-#define SYSCTL_ZERO ((void *)&sysctl_vals[1])
-#define SYSCTL_ONE ((void *)&sysctl_vals[2])
-#define SYSCTL_TWO ((void *)&sysctl_vals[3])
+#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
+#define SYSCTL_ONE ((void *)&sysctl_vals[1])
+#define SYSCTL_TWO ((void *)&sysctl_vals[2])
+#define SYSCTL_THREE ((void *)&sysctl_vals[3])
#define SYSCTL_FOUR ((void *)&sysctl_vals[4])
#define SYSCTL_ONE_HUNDRED ((void *)&sysctl_vals[5])
#define SYSCTL_TWO_HUNDRED ((void *)&sysctl_vals[6])
@@ -51,6 +51,7 @@ struct ctl_dir;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
#define SYSCTL_MAXOLDUID ((void *)&sysctl_vals[10])
+#define SYSCTL_NEG_ONE ((void *)&sysctl_vals[11])
extern const int sysctl_vals[];
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 809bccd08455..cc42db51bbba 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -197,6 +197,7 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
/* Flags for driver_info::data */
#define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */
+#define RNDIS_DRIVER_DATA_DST_MAC_FIXUP 2 /* device ignores configured MAC address */
extern void rndis_status(struct usbnet *dev, struct urb *urb);
extern int
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 8336e86ce606..1b4d72d5e891 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -214,6 +214,7 @@ extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+extern int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3049cb69c025..9cf6870b526e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -134,7 +134,8 @@ struct tc_action_ops {
(*get_psample_group)(const struct tc_action *a,
tc_action_priv_destructor *destructor);
int (*offload_act_setup)(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind);
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack);
};
struct tc_action_net {
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 833672d6fbe4..85f9e8417688 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -203,8 +203,8 @@ struct wpan_phy {
/* PHY depended MAC PIB values */
- /* 802.15.4 acronym: Tdsym in usec */
- u8 symbol_duration;
+ /* 802.15.4 acronym: Tdsym in nsec */
+ u32 symbol_duration;
/* lifs and sifs periods timing */
u16 lifs_period;
u16 sifs_period;
@@ -415,4 +415,6 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
return dev_name(&phy->dev);
}
+void ieee802154_configure_durations(struct wpan_phy *phy);
+
#endif /* __NET_CFG802154_H */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index a30180c0988a..062895973656 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
struct devlink;
+struct devlink_linecard;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
@@ -135,6 +136,7 @@ struct devlink_port {
struct mutex reporters_lock; /* Protects reporter_list */
struct devlink_rate *devlink_rate;
+ struct devlink_linecard *linecard;
};
struct devlink_port_new_attrs {
@@ -148,6 +150,51 @@ struct devlink_port_new_attrs {
sfnum_valid:1;
};
+struct devlink_info_req;
+struct devlink_linecard_device;
+
+/**
+ * struct devlink_linecard_ops - Linecard operations
+ * @provision: callback to provision the linecard slot with certain
+ * type of linecard. As a result of this operation,
+ * driver is expected to eventually (could be after
+ * the function call returns) call one of:
+ * devlink_linecard_provision_set()
+ * devlink_linecard_provision_fail()
+ * @unprovision: callback to unprovision the linecard slot. As a result
+ * of this operation, driver is expected to eventually
+ * (could be after the function call returns) call
+ * devlink_linecard_provision_clear()
+ * devlink_linecard_provision_fail()
+ * @same_provision: callback to ask the driver if linecard is already
+ * provisioned in the same way user asks this linecard to be
+ * provisioned.
+ * @types_count: callback to get number of supported types
+ * @types_get: callback to get next type in list
+ * @info_get: callback to get linecard info
+ * @device_info_get: callback to get linecard device info
+ */
+struct devlink_linecard_ops {
+ int (*provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv,
+ struct netlink_ext_ack *extack);
+ int (*unprovision)(struct devlink_linecard *linecard, void *priv,
+ struct netlink_ext_ack *extack);
+ bool (*same_provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv);
+ unsigned int (*types_count)(struct devlink_linecard *linecard,
+ void *priv);
+ void (*types_get)(struct devlink_linecard *linecard,
+ void *priv, unsigned int index, const char **type,
+ const void **type_priv);
+ int (*info_get)(struct devlink_linecard *linecard, void *priv,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ int (*device_info_get)(struct devlink_linecard_device *device,
+ void *priv, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+};
+
struct devlink_sb_pool_info {
enum devlink_sb_pool_type pool_type;
u32 size;
@@ -592,7 +639,6 @@ struct devlink_flash_update_params {
#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1)
struct devlink_region;
-struct devlink_info_req;
/**
* struct devlink_region_ops - Region operations
@@ -1536,6 +1582,24 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
int devlink_rate_leaf_create(struct devlink_port *port, void *priv);
void devlink_rate_leaf_destroy(struct devlink_port *devlink_port);
void devlink_rate_nodes_destroy(struct devlink *devlink);
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard);
+struct devlink_linecard *
+devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devlink_linecard_destroy(struct devlink_linecard *linecard);
+struct devlink_linecard_device *
+devlink_linecard_device_create(struct devlink_linecard *linecard,
+ unsigned int device_index, void *priv);
+void
+devlink_linecard_device_destroy(struct devlink_linecard *linecard,
+ struct devlink_linecard_device *linecard_device);
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type);
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
+void devlink_linecard_activate(struct devlink_linecard *linecard);
+void devlink_linecard_deactivate(struct devlink_linecard *linecard);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 9f65f1bfbd24..a4c6057c7097 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -253,6 +253,14 @@ struct flow_dissector_key_hash {
u32 hash;
};
+/**
+ * struct flow_dissector_key_num_of_vlans:
+ * @num_of_vlans: num_of_vlans value
+ */
+struct flow_dissector_key_num_of_vlans {
+ u8 num_of_vlans;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -282,6 +290,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 4cfdef6ca4f6..c8490729b4ae 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -64,6 +64,14 @@ struct inet6_ifaddr {
struct hlist_node addr_lst;
struct list_head if_list;
+ /*
+ * Used to safely traverse idev->addr_list in process context
+ * if the idev->lock needed to protect idev->addr_list cannot be held.
+ * In that case, add the items to this list temporarily and iterate
+ * without holding idev->lock.
+ * See addrconf_ifdown and dev_forward_change.
+ */
+ struct list_head if_list_aux;
struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 6a82bcb8813b..a378eff827c7 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -212,7 +212,7 @@ struct fib_rt_info {
u32 tb_id;
__be32 dst;
int dst_len;
- u8 tos;
+ dscp_t dscp;
u8 type;
u8 offload:1,
trap:1,
@@ -225,7 +225,7 @@ struct fib_entry_notifier_info {
u32 dst;
int dst_len;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
u32 tb_id;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 382ebb862ea8..75880fc70700 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1201,9 +1201,9 @@ static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb
* in the TX status but the rate control information (it does clear
* the count since you need to fill that in anyway).
*
- * NOTE: You can only use this function if you do NOT use
- * info->driver_data! Use info->rate_driver_data
- * instead if you need only the less space that allows.
+ * NOTE: While the rates array is kept intact, this will wipe all of the
+ * driver_data fields in info, so it's up to the driver to restore
+ * any fields it needs after calling this helper.
*/
static inline void
ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
@@ -2056,6 +2056,45 @@ struct ieee80211_sta_txpwr {
enum nl80211_tx_power_setting type;
};
+#define MAX_STA_LINKS 15
+
+/**
+ * struct ieee80211_link_sta - station Link specific info
+ * All link specific info for a STA link for a non MLD STA(single)
+ * or a MLD STA(multiple entries) are stored here.
+ *
+ * @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr
+ * in ieee80211_sta. For MLO Link STA this addr can be same or different
+ * from addr in ieee80211_sta (representing MLD STA addr)
+ * @supp_rates: Bitmap of supported rates
+ * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
+ * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
+ * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
+ * @eht_cap: EHT capabilities of this STA
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ * station can receive at the moment, changed by operating mode
+ * notifications and capabilities. The value is only valid after
+ * the station moves to associated state.
+ * @txpwr: the station tx power configuration
+ *
+ */
+struct ieee80211_link_sta {
+ u8 addr[ETH_ALEN];
+
+ u32 supp_rates[NUM_NL80211_BANDS];
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+
+ u8 rx_nss;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
+ struct ieee80211_sta_txpwr txpwr;
+};
+
/**
* struct ieee80211_sta - station table entry
*
@@ -2065,15 +2104,11 @@ struct ieee80211_sta_txpwr {
* either be protected by rcu_read_lock() explicitly or implicitly,
* or you must take good care to not use such a pointer after a
* call to your sta_remove callback that removed it.
+ * This also represents the MLD STA in case of MLO association
+ * and holds pointers to various link STA's
*
* @addr: MAC address
* @aid: AID we assigned to the station if we're an AP
- * @supp_rates: Bitmap of supported rates (per band)
- * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
- * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @he_cap: HE capabilities of this STA
- * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
- * @eht_cap: EHT capabilities of this STA
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -2085,11 +2120,6 @@ struct ieee80211_sta_txpwr {
* if wme is supported. The bits order is like in
* IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
* @max_sp: max Service Period. Only valid if wme is supported.
- * @bandwidth: current bandwidth the station can receive with
- * @rx_nss: in HT/VHT, the maximum number of spatial streams the
- * station can receive at the moment, changed by operating mode
- * notifications and capabilities. The value is only valid after
- * the station moves to associated state.
* @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
@@ -2102,25 +2132,28 @@ struct ieee80211_sta_txpwr {
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
* @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
* @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
- * @txpwr: the station tx power configuration
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
* the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
+ * @multi_link_sta: Identifies if this sta is a MLD STA
+ * @deflink: This holds the default link STA information, for non MLO STA all link
+ * specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
*/
struct ieee80211_sta {
- u32 supp_rates[NUM_NL80211_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
- struct ieee80211_sta_ht_cap ht_cap;
- struct ieee80211_sta_vht_cap vht_cap;
- struct ieee80211_sta_he_cap he_cap;
- struct ieee80211_he_6ghz_capa he_6ghz_capa;
- struct ieee80211_sta_eht_cap eht_cap;
u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
- u8 rx_nss;
- enum ieee80211_sta_rx_bandwidth bandwidth;
enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
@@ -2147,10 +2180,13 @@ struct ieee80211_sta {
bool support_p2p_ps;
u16 max_rc_amsdu_len;
u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
- struct ieee80211_sta_txpwr txpwr;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
+ bool multi_link_sta;
+ struct ieee80211_link_sta deflink;
+ struct ieee80211_link_sta *link[MAX_STA_LINKS];
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -2434,6 +2470,9 @@ struct ieee80211_txq {
* usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to
* the stack.
*
+ * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color
+ * collision detection and doesn't need it in software.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2489,6 +2528,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD,
IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
+ IEEE80211_HW_DETECTS_COLOR_COLLISION,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -6367,7 +6407,7 @@ static inline int rate_supported(struct ieee80211_sta *sta,
enum nl80211_band band,
int index)
{
- return (sta == NULL || sta->supp_rates[band] & BIT(index));
+ return (sta == NULL || sta->deflink.supp_rates[band] & BIT(index));
}
static inline s8
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 2c3bbc6645ba..bdac0ddbdcdb 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -498,4 +498,23 @@ void ieee802154_stop_queue(struct ieee802154_hw *hw);
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling);
+/**
+ * ieee802154_xmit_error - offloaded frame transmission failed
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @reason: error code
+ */
+void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
+ int reason);
+
+/**
+ * ieee802154_xmit_hw_error - frame could not be offloaded to the transmitter
+ * because of a hardware error (bus error, timeout, etc)
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ */
+void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb);
+
#endif /* NET_MAC802154_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 0a3b0fb04a3b..8b1afd6f5cc4 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -35,7 +35,8 @@ struct mptcp_ext {
frozen:1,
reset_transient:1;
u8 reset_reason:4,
- csum_reqd:1;
+ csum_reqd:1,
+ infinite_map:1;
};
#define MPTCP_RM_IDS_MAX 8
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index b08b70989d2c..69e6c6a218be 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -43,6 +43,11 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net_ecache {
+ struct delayed_work dwork;
+ struct netns_ct *ct_net;
+};
+
struct nf_conntrack_net {
/* only used when new connection is allocated: */
atomic_t count;
@@ -58,8 +63,7 @@ struct nf_conntrack_net {
struct ctl_table_header *sysctl_header;
#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct delayed_work ecache_dwork;
- struct netns_ct *ct_net;
+ struct nf_conntrack_net_ecache ecache;
#endif
};
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index ea5fb70e5101..813c93499f20 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -117,6 +117,10 @@ struct page_pool_stats {
struct page_pool_recycle_stats recycle_stats;
};
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
/*
* Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a
@@ -124,6 +128,23 @@ struct page_pool_stats {
*/
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats);
+#else
+
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+
#endif
struct page_pool {
diff --git a/include/net/ping.h b/include/net/ping.h
index 2fe78874318c..e4ff3911cbf5 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -71,12 +71,12 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-bool ping_rcv(struct sk_buff *skb);
+enum skb_drop_reason ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a3b57a93228a..8cf001aed858 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -547,10 +547,12 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
int tc_setup_offload_action(struct flow_action *flow_action,
- const struct tcf_exts *exts);
+ const struct tcf_exts *exts,
+ struct netlink_ext_ack *extack);
void tc_cleanup_offload_action(struct flow_action *flow_action);
int tc_setup_action(struct flow_action *flow_action,
- struct tc_action *actions[]);
+ struct tc_action *actions[],
+ struct netlink_ext_ack *extack);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held);
diff --git a/include/net/route.h b/include/net/route.h
index 25404fc2b483..991a3985712d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -43,6 +43,19 @@
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
+static inline __u8 ip_sock_rt_scope(const struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_LOCALROUTE))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
+}
+
+static inline __u8 ip_sock_rt_tos(const struct sock *sk)
+{
+ return RT_TOS(inet_sk(sk)->tos);
+}
+
struct ip_tunnel_info;
struct fib_nh;
struct fib_info;
@@ -289,39 +302,38 @@ static inline char rt_tos2priority(u8 tos)
* ip_route_newports() calls.
*/
-static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
- u32 tos, int oif, u8 protocol,
+static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
__u8 flow_flags = 0;
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
- flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
- protocol, flow_flags, dst, src, dport, sport,
- sk->sk_uid);
+ flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk),
+ ip_sock_rt_scope(sk), protocol, flow_flags, dst,
+ src, dport, sport, sk->sk_uid);
}
-static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
- __be32 dst, __be32 src, u32 tos,
- int oif, u8 protocol,
+static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;
- ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
- sport, dport, sk);
+ ip_route_connect_init(fl4, dst, src, oif, protocol, sport, dport, sk);
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
- flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
+ flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr,
+ fl4->saddr);
}
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 9f48733bfd21..bf8bb3357825 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -10,9 +10,23 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
- RTNL_FLAG_DOIT_UNLOCKED = 1,
+ RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+ RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
};
+enum rtnl_kinds {
+ RTNL_KIND_NEW,
+ RTNL_KIND_DEL,
+ RTNL_KIND_GET,
+ RTNL_KIND_SET
+};
+#define RTNL_KIND_MASK 0x3
+
+static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
+{
+ return msgtype & RTNL_KIND_MASK;
+}
+
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_register_module(struct module *owner, int protocol, int msgtype,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index bf3716fe83e0..a04999ee99b0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,7 +103,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
-struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *);
typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
diff --git a/include/net/sock.h b/include/net/sock.h
index c4b91fc19b9c..73063c88a249 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -292,7 +292,6 @@ struct sk_filter;
* @sk_pacing_shift: scaling factor for TCP Small Queues
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
- * @defer_list: head of llist storing skbs to be freed
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
* @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
@@ -417,7 +416,6 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
- struct llist_head defer_list;
#define sk_rmem_alloc sk_backlog.rmem_alloc
@@ -895,6 +893,7 @@ enum sock_flags {
SOCK_TXTIME,
SOCK_XDP, /* XDP is attached */
SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
+ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1202,8 +1201,7 @@ struct proto {
int (*sendmsg)(struct sock *sk, struct msghdr *msg,
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags,
- int *addr_len);
+ size_t len, int flags, int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*bind)(struct sock *sk,
@@ -1825,11 +1823,17 @@ int sock_getsockopt(struct socket *sock, int level, int op,
char __user *optval, int __user *optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32);
-struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
- int noblock, int *errcode);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode, int max_page_order);
+
+static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+ unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
+}
+
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
@@ -2392,7 +2396,14 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return sock_queue_rcv_skb_reason(sk, skb, NULL);
+}
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
@@ -2643,20 +2654,21 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
__sock_recv_wifi_status(msg, sk, skb);
}
-void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
-static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb)
+static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
{
-#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
- (1UL << SOCK_RCVTSTAMP))
+#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
+ (1UL << SOCK_RCVTSTAMP) | \
+ (1UL << SOCK_RCVMARK))
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
- __sock_recv_ts_and_drops(msg, sk, skb);
+ if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ __sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 732b7097d78e..a191486eb1e4 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -70,6 +70,10 @@ struct sk_skb_cb {
* when dst_reg == src_reg.
*/
u64 temp_reg;
+ struct tls_msg {
+ u8 control;
+ u8 decrypted;
+ } tls;
};
static inline struct strp_msg *strp_msg(struct sk_buff *skb)
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index eb8f01c819e6..832efd40e023 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -59,4 +59,19 @@ static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
+static inline bool is_tcf_gact_continue(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_UNSPEC, false);
+}
+
+static inline bool is_tcf_gact_reclassify(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_RECLASSIFY, false);
+}
+
+static inline bool is_tcf_gact_pipe(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_PIPE, false);
+}
+
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 00bfee70609e..dc1079f28e13 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -17,6 +17,7 @@ struct tcf_skbedit_params {
u32 mark;
u32 mask;
u16 queue_mapping;
+ u16 mapping_mod;
u16 ptype;
struct rcu_head rcu;
};
@@ -94,4 +95,16 @@ static inline u32 tcf_skbedit_priority(const struct tc_action *a)
return priority;
}
+/* Return true iff action is queue_mapping */
+static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
+}
+
+/* Return true iff action is inheritdsfield */
+static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_INHERITDSFIELD);
+}
+
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cc1295037533..1e99f5c61f84 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -407,7 +407,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
-int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
int tcp_set_window_clamp(struct sock *sk, int val);
@@ -1142,15 +1142,6 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
}
-static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- if (icsk->icsk_ca_ops->set_state)
- icsk->icsk_ca_ops->set_state(sk, ca_state);
- icsk->icsk_ca_state = ca_state;
-}
-
static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1159,6 +1150,9 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_cong.c */
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
+
/* From tcp_rate.c */
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
@@ -1215,9 +1209,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd;
+}
+
+static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
+{
+ WARN_ON_ONCE((int)val <= 0);
+ tp->snd_cwnd = val;
+}
+
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
{
- return tp->snd_cwnd < tp->snd_ssthresh;
+ return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
}
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
@@ -1243,8 +1248,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
return tp->snd_ssthresh;
else
return max(tp->snd_ssthresh,
- ((tp->snd_cwnd >> 1) +
- (tp->snd_cwnd >> 2)));
+ ((tcp_snd_cwnd(tp) >> 1) +
+ (tcp_snd_cwnd(tp) >> 2)));
}
/* Use define here intentionally to get WARN_ON location shown at the caller */
@@ -1286,7 +1291,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
if (tcp_in_slow_start(tp))
- return tp->snd_cwnd < 2 * tp->max_packets_out;
+ return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
return tp->is_cwnd_limited;
}
@@ -1378,18 +1383,6 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);
-#ifdef CONFIG_INET
-void __sk_defer_free_flush(struct sock *sk);
-
-static inline void sk_defer_free_flush(struct sock *sk)
-{
- if (llist_empty(&sk->defer_list))
- return;
- __sk_defer_free_flush(sk);
-}
-#else
-static inline void sk_defer_free_flush(struct sock *sk) {}
-#endif
int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
diff --git a/include/net/tls.h b/include/net/tls.h
index b6968a5b5538..b59f0a63292b 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -64,6 +64,7 @@
#define TLS_AAD_SPACE_SIZE 13
#define MAX_IV_SIZE 16
+#define TLS_TAG_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
@@ -117,11 +118,6 @@ struct tls_rec {
u8 aead_req_ctx[];
};
-struct tls_msg {
- struct strp_msg rxm;
- u8 control;
-};
-
struct tx_work {
struct delayed_work work;
struct sock *sk;
@@ -152,13 +148,10 @@ struct tls_sw_context_rx {
void (*saved_data_ready)(struct sock *sk);
struct sk_buff *recv_pkt;
- u8 control;
u8 async_capable:1;
- u8 decrypted:1;
atomic_t decrypt_pending;
/* protect crypto_wait with decrypt_pending*/
spinlock_t decrypt_compl_lock;
- bool async_notify;
};
struct tls_record_info {
@@ -378,7 +371,7 @@ void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
+ int flags, int *addr_len);
bool tls_sw_sock_is_readable(struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
@@ -411,7 +404,9 @@ void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
- return (struct tls_msg *)strp_msg(skb);
+ struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
+
+ return &scb->tls;
}
static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
diff --git a/include/net/udp.h b/include/net/udp.h
index f1c2a88c9005..b83a00330566 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -250,14 +250,14 @@ void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
-struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *off, int *err);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
+ int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *err)
+ int *err)
{
int off = 0;
- return __skb_recv_udp(sk, flags, noblock, &off, err);
+ return __skb_recv_udp(sk, flags, &off, err);
}
int udp_v4_early_demux(struct sk_buff *skb);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 69d883f7fb41..11ee4eaf84bd 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2497,15 +2497,7 @@ struct ib_device_ops {
struct ib_flow_attr *flow_attr,
struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
- struct ib_flow_action *(*create_flow_action_esp)(
- struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action);
- int (*modify_flow_action_esp)(
- struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
int state);
int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 9b4e6c78d0f4..8d8d46778f7e 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -105,6 +105,11 @@
#define REG_RESERVED_ADDR 0xffffffff
#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
+#define for_each_stat(ocelot, stat) \
+ for ((stat) = (ocelot)->stats_layout; \
+ ((stat)->name[0] != '\0'); \
+ (stat)++)
+
enum ocelot_target {
ANA = 1,
QS,
@@ -538,6 +543,8 @@ struct ocelot_stat_layout {
char name[ETH_GSTRING_LEN];
};
+#define OCELOT_STAT_END { .name = "" }
+
struct ocelot_stats_region {
struct list_head node;
u32 offset;
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index f8e28e686c65..563e48617374 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -84,6 +84,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__field(u8, reset_transient)
__field(u8, reset_reason)
__field(u8, csum_reqd)
+ __field(u8, infinite_map)
),
TP_fast_assign(
@@ -102,9 +103,10 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__entry->reset_transient = mpext->reset_transient;
__entry->reset_reason = mpext->reset_reason;
__entry->csum_reqd = mpext->csum_reqd;
+ __entry->infinite_map = mpext->infinite_map;
),
- TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u",
+ TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u infinite_map=%u",
__entry->data_ack, __entry->data_seq,
__entry->subflow_seq, __entry->data_len,
__entry->csum, __entry->use_map,
@@ -112,7 +114,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__entry->use_ack, __entry->ack64,
__entry->mpc_map, __entry->frozen,
__entry->reset_transient, __entry->reset_reason,
- __entry->csum_reqd)
+ __entry->csum_reqd, __entry->infinite_map)
);
DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index e1670e1e4934..a477bf907498 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -37,6 +37,20 @@
EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \
EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \
EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \
+ EM(SKB_DROP_REASON_TCP_OFO_DROP, TCP_OFO_DROP) \
+ EM(SKB_DROP_REASON_TCP_RFC7323_PAWS, TCP_RFC7323_PAWS) \
+ EM(SKB_DROP_REASON_TCP_INVALID_SEQUENCE, \
+ TCP_INVALID_SEQUENCE) \
+ EM(SKB_DROP_REASON_TCP_RESET, TCP_RESET) \
+ EM(SKB_DROP_REASON_TCP_INVALID_SYN, TCP_INVALID_SYN) \
+ EM(SKB_DROP_REASON_TCP_CLOSE, TCP_CLOSE) \
+ EM(SKB_DROP_REASON_TCP_FASTOPEN, TCP_FASTOPEN) \
+ EM(SKB_DROP_REASON_TCP_OLD_ACK, TCP_OLD_ACK) \
+ EM(SKB_DROP_REASON_TCP_TOO_OLD_ACK, TCP_TOO_OLD_ACK) \
+ EM(SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, \
+ TCP_ACK_UNSENT_DATA) \
+ EM(SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, \
+ TCP_OFO_QUEUE_PRUNE) \
EM(SKB_DROP_REASON_IP_OUTNOROUTES, IP_OUTNOROUTES) \
EM(SKB_DROP_REASON_BPF_CGROUP_EGRESS, \
BPF_CGROUP_EGRESS) \
@@ -50,7 +64,7 @@
EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \
EM(SKB_DROP_REASON_XDP, XDP) \
EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \
- EM(SKB_DROP_REASON_PTYPE_ABSENT, PTYPE_ABSENT) \
+ EM(SKB_DROP_REASON_UNHANDLED_PROTO, UNHANDLED_PROTO) \
EM(SKB_DROP_REASON_SKB_CSUM, SKB_CSUM) \
EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG) \
EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT) \
@@ -61,6 +75,11 @@
EM(SKB_DROP_REASON_HDR_TRUNC, HDR_TRUNC) \
EM(SKB_DROP_REASON_TAP_FILTER, TAP_FILTER) \
EM(SKB_DROP_REASON_TAP_TXFILTER, TAP_TXFILTER) \
+ EM(SKB_DROP_REASON_ICMP_CSUM, ICMP_CSUM) \
+ EM(SKB_DROP_REASON_INVALID_PROTO, INVALID_PROTO) \
+ EM(SKB_DROP_REASON_IP_INADDRERRORS, IP_INADDRERRORS) \
+ EM(SKB_DROP_REASON_IP_INNOROUTES, IP_INNOROUTES) \
+ EM(SKB_DROP_REASON_PKT_TOO_BIG, PKT_TOO_BIG) \
EMe(SKB_DROP_REASON_MAX, MAX)
#undef EM
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 521059d8dc0a..901b440238d5 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -279,7 +279,7 @@ TRACE_EVENT(tcp_probe,
__entry->data_len = skb->len - __tcp_hdrlen(th);
__entry->snd_nxt = tp->snd_nxt;
__entry->snd_una = tp->snd_una;
- __entry->snd_cwnd = tp->snd_cwnd;
+ __entry->snd_cwnd = tcp_snd_cwnd(tp);
__entry->snd_wnd = tp->snd_wnd;
__entry->rcv_wnd = tp->rcv_wnd;
__entry->ssthresh = tcp_current_ssthresh(sk);
@@ -371,6 +371,51 @@ DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
TP_ARGS(skb)
);
+TRACE_EVENT(tcp_cong_state_set,
+
+ TP_PROTO(struct sock *sk, const u8 ca_state),
+
+ TP_ARGS(sk, ca_state),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(__u8, cong_state)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->cong_state = ca_state;
+ ),
+
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->cong_state)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 467ca2f28760..638230899e98 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -130,6 +130,8 @@
#define SO_TXREHASH 74
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
deleted file mode 100644
index 5135027b93c1..000000000000
--- a/include/uapi/linux/atm_zatm.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by
- driver-specific utilities) */
-
-/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#ifndef LINUX_ATM_ZATM_H
-#define LINUX_ATM_ZATM_H
-
-/*
- * Note: non-kernel programs including this file must also include
- * sys/types.h for struct timeval
- */
-
-#include <linux/atmapi.h>
-#include <linux/atmioc.h>
-
-#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
- /* get pool statistics */
-#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc)
- /* get statistics and zero */
-#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc)
- /* set pool parameters */
-
-struct zatm_pool_info {
- int ref_count; /* free buffer pool usage counters */
- int low_water,high_water; /* refill parameters */
- int rqa_count,rqu_count; /* queue condition counters */
- int offset,next_off; /* alignment optimizations: offset */
- int next_cnt,next_thres; /* repetition counter and threshold */
-};
-
-struct zatm_pool_req {
- int pool_num; /* pool number */
- struct zatm_pool_info info; /* actual information */
-};
-
-#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */
-#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */
-#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */
-#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */
-
-#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to
- record; must be 2^n */
-
-#endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d14b10b85e51..444fe6f1cf35 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5143,6 +5143,17 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5339,6 +5350,7 @@ union bpf_attr {
FN(copy_from_user_task), \
FN(skb_set_tstamp), \
FN(ima_file_hash), \
+ FN(kptr_xchg), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index b0d8fea1951d..a9162a6c0284 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -33,8 +33,8 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index b897b80770f6..fb8c3864457f 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -131,6 +131,13 @@ enum devlink_command {
DEVLINK_CMD_RATE_NEW,
DEVLINK_CMD_RATE_DEL,
+ DEVLINK_CMD_LINECARD_GET, /* can dump */
+ DEVLINK_CMD_LINECARD_SET,
+ DEVLINK_CMD_LINECARD_NEW,
+ DEVLINK_CMD_LINECARD_DEL,
+
+ DEVLINK_CMD_LINECARD_INFO_GET, /* can dump */
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -338,6 +345,19 @@ enum devlink_reload_limit {
#define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1)
+enum devlink_linecard_state {
+ DEVLINK_LINECARD_STATE_UNSPEC,
+ DEVLINK_LINECARD_STATE_UNPROVISIONED,
+ DEVLINK_LINECARD_STATE_UNPROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING_FAILED,
+ DEVLINK_LINECARD_STATE_PROVISIONED,
+ DEVLINK_LINECARD_STATE_ACTIVE,
+
+ __DEVLINK_LINECARD_STATE_MAX,
+ DEVLINK_LINECARD_STATE_MAX = __DEVLINK_LINECARD_STATE_MAX - 1
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -553,6 +573,14 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */
+ DEVLINK_ATTR_LINECARD_INDEX, /* u32 */
+ DEVLINK_ATTR_LINECARD_STATE, /* u8 */
+ DEVLINK_ATTR_LINECARD_TYPE, /* string */
+ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */
+ DEVLINK_ATTR_LINECARD_DEVICE_LIST, /* nested */
+ DEVLINK_ATTR_LINECARD_DEVICE, /* nested */
+ DEVLINK_ATTR_LINECARD_DEVICE_INDEX, /* u32 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 7bc4b8def12c..e0f0ee9bc89e 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1691,6 +1691,7 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 979850221b8d..d2fb4f7be61b 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -338,6 +338,7 @@ enum {
ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
+ ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cc284c048e69..d1e600816b82 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -211,6 +211,9 @@ struct rtnl_link_stats {
* @rx_nohandler: Number of packets received on the interface
* but dropped by the networking stack because the device is
* not designated to receive packets (e.g. backup link in a bond).
+ *
+ * @rx_otherhost_dropped: Number of packets dropped due to mismatch
+ * in destination MAC address.
*/
struct rtnl_link_stats64 {
__u64 rx_packets;
@@ -243,6 +246,8 @@ struct rtnl_link_stats64 {
__u64 rx_compressed;
__u64 tx_compressed;
__u64 rx_nohandler;
+
+ __u64 rx_otherhost_dropped;
};
/* Subset of link stats useful for in-HW collection. Meaning of the fields is as
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index d4178dace0bf..549ddeaf788b 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -194,6 +194,7 @@ enum {
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
+ DEVCONF_ACCEPT_UNSOLICITED_NA,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index c54e6eae5366..75b7257a51e1 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -67,6 +67,19 @@
#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+#define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */
+#define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */
+#define MDIO_PMA_PMD_BT1 18 /* BASE-T1 PMA/PMD extended ability */
+#define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */
+#define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */
+#define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_M 515 /* BASE-T1 AN advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_H 516 /* BASE-T1 AN advertisement register [47:32] */
+#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -159,6 +172,7 @@
#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
#define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */
#define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */
+#define MDIO_PMA_CTRL2_BASET1 0x003D /* BASE-T1 type */
#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
@@ -212,6 +226,7 @@
#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */
#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
/* PHY XGXS lane state register. */
@@ -268,6 +283,66 @@
#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+/* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_CTRL_LB_EN 0x0001 /* Enable loopback mode */
+#define MDIO_PMA_10T1L_CTRL_EEE_EN 0x0400 /* Enable EEE mode */
+#define MDIO_PMA_10T1L_CTRL_LOW_POWER 0x0800 /* Low-power mode */
+#define MDIO_PMA_10T1L_CTRL_2V4_EN 0x1000 /* Enable 2.4 Vpp operating mode */
+#define MDIO_PMA_10T1L_CTRL_TX_DIS 0x4000 /* Transmit disable */
+#define MDIO_PMA_10T1L_CTRL_PMA_RST 0x8000 /* MA reset */
+
+/* 10BASE-T1L PMA status register. */
+#define MDIO_PMA_10T1L_STAT_LINK 0x0001 /* PMA receive link up */
+#define MDIO_PMA_10T1L_STAT_FAULT 0x0002 /* Fault condition detected */
+#define MDIO_PMA_10T1L_STAT_POLARITY 0x0004 /* Receive polarity is reversed */
+#define MDIO_PMA_10T1L_STAT_RECV_FAULT 0x0200 /* Able to detect fault on receive path */
+#define MDIO_PMA_10T1L_STAT_EEE 0x0400 /* PHY has EEE ability */
+#define MDIO_PMA_10T1L_STAT_LOW_POWER 0x0800 /* PMA has low-power ability */
+#define MDIO_PMA_10T1L_STAT_2V4_ABLE 0x1000 /* PHY has 2.4 Vpp operating mode ability */
+#define MDIO_PMA_10T1L_STAT_LB_ABLE 0x2000 /* PHY has loopback ability */
+
+/* 10BASE-T1L PCS control register. */
+#define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */
+#define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */
+
+/* BASE-T1 PMA/PMD extended ability register. */
+#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */
+
+/* BASE-T1 auto-negotiation advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP
+#define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
+#define MDIO_AN_T1_ADV_L_FORCE_MS 0x1000 /* Force Master/slave Configuration */
+#define MDIO_AN_T1_ADV_L_REMOTE_FAULT ADVERTISE_RFAULT
+#define MDIO_AN_T1_ADV_L_ACK ADVERTISE_LPACK
+#define MDIO_AN_T1_ADV_L_NEXT_PAGE_REQ ADVERTISE_NPAGE
+
+/* BASE-T1 auto-negotiation advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */
+#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */
+
+/* BASE-T1 auto-negotiation advertisement register [47:32] */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level Transmit Request */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level Transmit Ability */
+
+/* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_L_PAUSE_CAP LPA_PAUSE_CAP
+#define MDIO_AN_T1_LP_L_PAUSE_ASYM LPA_PAUSE_ASYM
+#define MDIO_AN_T1_LP_L_FORCE_MS 0x1000 /* LP Force Master/slave Configuration */
+#define MDIO_AN_T1_LP_L_REMOTE_FAULT LPA_RFAULT
+#define MDIO_AN_T1_LP_L_ACK LPA_LPACK
+#define MDIO_AN_T1_LP_L_NEXT_PAGE_REQ LPA_NPAGE
+
+/* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_M_MST 0x0010 /* LP master preference */
+#define MDIO_AN_T1_LP_M_B10L 0x4000 /* LP is compatible with 10BASE-T1L */
+
+/* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */
+#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */
+
+/* BASE-T1 PMA/PMD control register */
+#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
+
/* EEE Supported/Advertisement/LP Advertisement registers.
*
* EEE capability Register (3.20), Advertisement (7.60) and
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 9690efedb5fa..921963589904 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -55,6 +55,9 @@ enum {
MPTCP_PM_ATTR_ADDR, /* nested address */
MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
+ MPTCP_PM_ATTR_TOKEN, /* u32 */
+ MPTCP_PM_ATTR_LOC_ID, /* u8 */
+ MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */
__MPTCP_PM_ATTR_MAX
};
@@ -93,6 +96,10 @@ enum {
MPTCP_PM_CMD_SET_LIMITS,
MPTCP_PM_CMD_GET_LIMITS,
MPTCP_PM_CMD_SET_FLAGS,
+ MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_CMD_SUBFLOW_DESTROY,
__MPTCP_PM_CMD_AFTER_LAST
};
@@ -188,6 +195,7 @@ enum mptcp_event_attr {
MPTCP_ATTR_IF_IDX, /* s32 */
MPTCP_ATTR_RESET_REASON,/* u32 */
MPTCP_ATTR_RESET_FLAGS, /* u32 */
+ MPTCP_ATTR_SERVER_SIDE, /* u8 */
__MPTCP_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index db05fb55055e..39c565e460c7 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -32,6 +32,8 @@ enum {
NDA_NH_ID,
NDA_FDB_EXT_ATTRS,
NDA_FLAGS_EXT,
+ NDA_NDM_STATE_MASK,
+ NDA_NDM_FLAGS_MASK,
__NDA_MAX
};
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 4c0cde075c27..855dffb4c1c3 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -72,6 +72,7 @@ struct nlmsghdr {
/* Modifiers to DELETE request */
#define NLM_F_NONREC 0x100 /* Do not delete recursively */
+#define NLM_F_BULK 0x200 /* Delete multiple objects */
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 404f97fb239c..9a2ee1e39fad 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -587,6 +587,8 @@ enum {
TCA_FLOWER_KEY_HASH, /* u32 */
TCA_FLOWER_KEY_HASH_MASK, /* u32 */
+ TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 800e93377218..6cb6101208d0 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -29,6 +29,7 @@
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
#define SKBEDIT_F_INHERITDSFIELD 0x20
+#define SKBEDIT_F_TXQ_SKBHASH 0x40
struct tc_skbedit {
tc_gen;
@@ -45,6 +46,7 @@ enum {
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
TCA_SKBEDIT_FLAGS,
+ TCA_SKBEDIT_QUEUE_MAPPING_MAX,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4dfc05651c98..c00adf2fe868 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -43,10 +43,6 @@
#include <linux/tipc.h>
#include <asm/byteorder.h>
-#ifndef __KERNEL__
-#include <arpa/inet.h> /* for ntohs etc. */
-#endif
-
/*
* Configuration
*
@@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space)
*/
return (space >= TLV_SPACE(0)) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space);
}
static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
{
return TLV_OK(tlv, space) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
}
static inline int TLV_GET_LEN(struct tlv_desc *tlv)
{
- return ntohs(tlv->tlv_len);
+ return __be16_to_cpu(tlv->tlv_len);
}
static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
{
- tlv->tlv_len = htons(len);
+ tlv->tlv_len = __cpu_to_be16(len);
}
static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type)
{
- return (ntohs(tlv->tlv_type) == type);
+ return (__be16_to_cpu(tlv->tlv_type) == type);
}
static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
{
- tlv->tlv_type = htons(type);
+ tlv->tlv_type = __cpu_to_be16(type);
}
static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
@@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_len = TLV_LENGTH(len);
tlv_ptr = (struct tlv_desc *)tlv;
- tlv_ptr->tlv_type = htons(type);
- tlv_ptr->tlv_len = htons(tlv_len);
+ tlv_ptr->tlv_type = __cpu_to_be16(type);
+ tlv_ptr->tlv_len = __cpu_to_be16(tlv_len);
if (len && data) {
memcpy(TLV_DATA(tlv_ptr), data, len);
memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
@@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
{
- __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+ __u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len));
list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
list->tlv_space -= tlv_space;
@@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
msg_len = TCM_LENGTH(data_len);
tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
- tcm_hdr->tcm_len = htonl(msg_len);
- tcm_hdr->tcm_type = htons(cmd);
- tcm_hdr->tcm_flags = htons(flags);
+ tcm_hdr->tcm_len = __cpu_to_be32(msg_len);
+ tcm_hdr->tcm_type = __cpu_to_be16(cmd);
+ tcm_hdr->tcm_flags = __cpu_to_be16(flags);
if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7f145aefbff8..b3bf31fd9458 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,6 +11,7 @@
#include <linux/perf_event.h>
#include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h>
+#include <linux/btf_ids.h>
#include "map_in_map.h"
@@ -287,10 +288,12 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key
return 0;
}
-static void check_and_free_timer_in_array(struct bpf_array *arr, void *val)
+static void check_and_free_fields(struct bpf_array *arr, void *val)
{
- if (unlikely(map_value_has_timer(&arr->map)))
+ if (map_value_has_timer(&arr->map))
bpf_timer_cancel_and_free(val + arr->map.timer_off);
+ if (map_value_has_kptrs(&arr->map))
+ bpf_map_free_kptrs(&arr->map, val);
}
/* Called from syscall or from eBPF program */
@@ -327,7 +330,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
copy_map_value_locked(map, val, value, false);
else
copy_map_value(map, val, value);
- check_and_free_timer_in_array(array, val);
+ check_and_free_fields(array, val);
}
return 0;
}
@@ -386,7 +389,8 @@ static void array_map_free_timers(struct bpf_map *map)
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
- if (likely(!map_value_has_timer(map)))
+ /* We don't reset or free kptr on uref dropping to zero. */
+ if (!map_value_has_timer(map))
return;
for (i = 0; i < array->map.max_entries; i++)
@@ -398,6 +402,13 @@ static void array_map_free_timers(struct bpf_map *map)
static void array_map_free(struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
+ int i;
+
+ if (map_value_has_kptrs(map)) {
+ for (i = 0; i < array->map.max_entries; i++)
+ bpf_map_free_kptrs(map, array->value + array->elem_size * i);
+ bpf_map_free_kptr_off_tab(map);
+ }
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
@@ -680,7 +691,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
return num_elems;
}
-static int array_map_btf_id;
+BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
const struct bpf_map_ops array_map_ops = {
.map_meta_equal = array_map_meta_equal,
.map_alloc_check = array_map_alloc_check,
@@ -701,12 +712,10 @@ const struct bpf_map_ops array_map_ops = {
.map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem,
- .map_btf_name = "bpf_array",
- .map_btf_id = &array_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
-static int percpu_array_map_btf_id;
const struct bpf_map_ops percpu_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = array_map_alloc_check,
@@ -722,8 +731,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem,
- .map_btf_name = "bpf_array",
- .map_btf_id = &percpu_array_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
@@ -1102,7 +1110,6 @@ static void prog_array_map_free(struct bpf_map *map)
* Thus, prog_array_map cannot be used as an inner_map
* and map_meta_equal is not implemented.
*/
-static int prog_array_map_btf_id;
const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = prog_array_map_alloc,
@@ -1118,8 +1125,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem,
- .map_btf_name = "bpf_array",
- .map_btf_id = &prog_array_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
};
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@@ -1208,7 +1214,6 @@ static void perf_event_fd_array_map_free(struct bpf_map *map)
fd_array_map_free(map);
}
-static int perf_event_array_map_btf_id;
const struct bpf_map_ops perf_event_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check,
@@ -1221,8 +1226,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_array",
- .map_btf_id = &perf_event_array_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
};
#ifdef CONFIG_CGROUPS
@@ -1245,7 +1249,6 @@ static void cgroup_fd_array_free(struct bpf_map *map)
fd_array_map_free(map);
}
-static int cgroup_array_map_btf_id;
const struct bpf_map_ops cgroup_array_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check,
@@ -1257,8 +1260,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_array",
- .map_btf_id = &cgroup_array_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
};
#endif
@@ -1332,7 +1334,6 @@ static int array_of_map_gen_lookup(struct bpf_map *map,
return insn - insn_buf;
}
-static int array_of_maps_map_btf_id;
const struct bpf_map_ops array_of_maps_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_of_map_alloc,
@@ -1345,6 +1346,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = array_of_map_gen_lookup,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_array",
- .map_btf_id = &array_of_maps_map_btf_id,
+ .map_btf_id = &array_map_btf_ids[0],
};
diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
index b141a1346f72..b9ea539a5561 100644
--- a/kernel/bpf/bloom_filter.c
+++ b/kernel/bpf/bloom_filter.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/jhash.h>
#include <linux/random.h>
+#include <linux/btf_ids.h>
#define BLOOM_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
@@ -192,7 +193,7 @@ static int bloom_map_check_btf(const struct bpf_map *map,
return btf_type_is_void(key_type) ? 0 : -EINVAL;
}
-static int bpf_bloom_map_btf_id;
+BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
const struct bpf_map_ops bloom_filter_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = bloom_map_alloc,
@@ -205,6 +206,5 @@ const struct bpf_map_ops bloom_filter_map_ops = {
.map_update_elem = bloom_map_update_elem,
.map_delete_elem = bloom_map_delete_elem,
.map_check_btf = bloom_map_check_btf,
- .map_btf_name = "bpf_bloom_filter",
- .map_btf_id = &bpf_bloom_map_btf_id,
+ .map_btf_id = &bpf_bloom_map_btf_ids[0],
};
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 96be8d518885..5f7683b19199 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode *inode)
*/
bpf_selem_unlink_map(selem);
free_inode_storage = bpf_selem_unlink_storage_nolock(
- local_storage, selem, false);
+ local_storage, selem, false, false);
}
raw_spin_unlock_bh(&local_storage->lock);
rcu_read_unlock();
@@ -149,7 +149,7 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
if (!sdata)
return -ENOENT;
- bpf_selem_unlink(SELEM(sdata));
+ bpf_selem_unlink(SELEM(sdata), true);
return 0;
}
@@ -245,7 +245,8 @@ static void inode_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap, NULL);
}
-static int inode_storage_map_btf_id;
+BTF_ID_LIST_SINGLE(inode_storage_map_btf_ids, struct,
+ bpf_local_storage_map)
const struct bpf_map_ops inode_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
@@ -256,8 +257,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
.map_update_elem = bpf_fd_inode_storage_update_elem,
.map_delete_elem = bpf_fd_inode_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf,
- .map_btf_name = "bpf_local_storage_map",
- .map_btf_id = &inode_storage_map_btf_id,
+ .map_btf_id = &inode_storage_map_btf_ids[0],
.map_owner_storage_ptr = inode_storage_ptr,
};
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 110029ede71e..d5d96ceca105 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -330,35 +330,34 @@ static void cache_btf_id(struct bpf_iter_target_info *tinfo,
bool bpf_iter_prog_supported(struct bpf_prog *prog)
{
const char *attach_fname = prog->aux->attach_func_name;
+ struct bpf_iter_target_info *tinfo = NULL, *iter;
u32 prog_btf_id = prog->aux->attach_btf_id;
const char *prefix = BPF_ITER_FUNC_PREFIX;
- struct bpf_iter_target_info *tinfo;
int prefix_len = strlen(prefix);
- bool supported = false;
if (strncmp(attach_fname, prefix, prefix_len))
return false;
mutex_lock(&targets_mutex);
- list_for_each_entry(tinfo, &targets, list) {
- if (tinfo->btf_id && tinfo->btf_id == prog_btf_id) {
- supported = true;
+ list_for_each_entry(iter, &targets, list) {
+ if (iter->btf_id && iter->btf_id == prog_btf_id) {
+ tinfo = iter;
break;
}
- if (!strcmp(attach_fname + prefix_len, tinfo->reg_info->target)) {
- cache_btf_id(tinfo, prog);
- supported = true;
+ if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
+ cache_btf_id(iter, prog);
+ tinfo = iter;
break;
}
}
mutex_unlock(&targets_mutex);
- if (supported) {
+ if (tinfo) {
prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
}
- return supported;
+ return tinfo != NULL;
}
const struct bpf_func_proto *
@@ -499,12 +498,11 @@ bool bpf_link_is_iter(struct bpf_link *link)
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
struct bpf_prog *prog)
{
+ struct bpf_iter_target_info *tinfo = NULL, *iter;
struct bpf_link_primer link_primer;
- struct bpf_iter_target_info *tinfo;
union bpf_iter_link_info linfo;
struct bpf_iter_link *link;
u32 prog_btf_id, linfo_len;
- bool existed = false;
bpfptr_t ulinfo;
int err;
@@ -530,14 +528,14 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
prog_btf_id = prog->aux->attach_btf_id;
mutex_lock(&targets_mutex);
- list_for_each_entry(tinfo, &targets, list) {
- if (tinfo->btf_id == prog_btf_id) {
- existed = true;
+ list_for_each_entry(iter, &targets, list) {
+ if (iter->btf_id == prog_btf_id) {
+ tinfo = iter;
break;
}
}
mutex_unlock(&targets_mutex);
- if (!existed)
+ if (!tinfo)
return -ENOENT;
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
@@ -547,7 +545,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
link->tinfo = tinfo;
- err = bpf_link_prime(&link->link, &link_primer);
+ err = bpf_link_prime(&link->link, &link_primer);
if (err) {
kfree(link);
return err;
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 01aa2b51ec4d..8ce40fd869f6 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
*/
bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem,
- bool uncharge_mem)
+ bool uncharge_mem, bool use_trace_rcu)
{
struct bpf_local_storage_map *smap;
bool free_local_storage;
@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
SDATA(selem))
RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
- call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
+ if (use_trace_rcu)
+ call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
+ else
+ kfree_rcu(selem, rcu);
+
return free_local_storage;
}
-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
+static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
+ bool use_trace_rcu)
{
struct bpf_local_storage *local_storage;
bool free_local_storage = false;
@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
raw_spin_lock_irqsave(&local_storage->lock, flags);
if (likely(selem_linked_to_storage(selem)))
free_local_storage = bpf_selem_unlink_storage_nolock(
- local_storage, selem, true);
+ local_storage, selem, true, use_trace_rcu);
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
- if (free_local_storage)
- call_rcu_tasks_trace(&local_storage->rcu,
+ if (free_local_storage) {
+ if (use_trace_rcu)
+ call_rcu_tasks_trace(&local_storage->rcu,
bpf_local_storage_free_rcu);
+ else
+ kfree_rcu(local_storage, rcu);
+ }
}
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
raw_spin_unlock_irqrestore(&b->lock, flags);
}
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
{
/* Always unlink from map before unlinking from local_storage
* because selem will be freed after successfully unlinked from
* the local_storage.
*/
bpf_selem_unlink_map(selem);
- __bpf_selem_unlink_storage(selem);
+ __bpf_selem_unlink_storage(selem, use_trace_rcu);
}
struct bpf_local_storage_data *
@@ -466,7 +475,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
if (old_sdata) {
bpf_selem_unlink_map(SELEM(old_sdata));
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
- false);
+ false, true);
}
unlock:
@@ -548,7 +557,7 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
migrate_disable();
__this_cpu_inc(*busy_counter);
}
- bpf_selem_unlink(selem);
+ bpf_selem_unlink(selem, false);
if (busy_counter) {
__this_cpu_dec(*busy_counter);
migrate_enable();
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 21069dbe9138..3a0103ad97bc 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
+#include <linux/btf_ids.h>
enum bpf_struct_ops_state {
BPF_STRUCT_OPS_STATE_INIT,
@@ -263,7 +264,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
/* No lock is needed. state and refcnt do not need
* to be updated together under atomic context.
*/
- uvalue = (struct bpf_struct_ops_value *)value;
+ uvalue = value;
memcpy(uvalue, st_map->uvalue, map->value_size);
uvalue->state = state;
refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
@@ -353,7 +354,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (err)
return err;
- uvalue = (struct bpf_struct_ops_value *)value;
+ uvalue = value;
err = check_zero_holes(t, uvalue->data);
if (err)
return err;
@@ -612,7 +613,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return map;
}
-static int bpf_struct_ops_map_btf_id;
+BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_alloc_check = bpf_struct_ops_map_alloc_check,
.map_alloc = bpf_struct_ops_map_alloc,
@@ -622,8 +623,7 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_delete_elem = bpf_struct_ops_map_delete_elem,
.map_update_elem = bpf_struct_ops_map_update_elem,
.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
- .map_btf_name = "bpf_struct_ops_map",
- .map_btf_id = &bpf_struct_ops_map_btf_id,
+ .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
};
/* "const void *" because some subsystem is
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 6638a0ecc3d2..e9014dc62682 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_struct *task)
*/
bpf_selem_unlink_map(selem);
free_task_storage = bpf_selem_unlink_storage_nolock(
- local_storage, selem, false);
+ local_storage, selem, false, false);
}
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
bpf_task_storage_unlock();
@@ -192,7 +192,7 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
if (!sdata)
return -ENOENT;
- bpf_selem_unlink(SELEM(sdata));
+ bpf_selem_unlink(SELEM(sdata), true);
return 0;
}
@@ -307,7 +307,7 @@ static void task_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap, &bpf_task_storage_busy);
}
-static int task_storage_map_btf_id;
+BTF_ID_LIST_SINGLE(task_storage_map_btf_ids, struct, bpf_local_storage_map)
const struct bpf_map_ops task_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
@@ -318,8 +318,7 @@ const struct bpf_map_ops task_storage_map_ops = {
.map_update_elem = bpf_pid_task_storage_update_elem,
.map_delete_elem = bpf_pid_task_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf,
- .map_btf_name = "bpf_local_storage_map",
- .map_btf_id = &task_storage_map_btf_id,
+ .map_btf_id = &task_storage_map_btf_ids[0],
.map_owner_storage_ptr = task_storage_ptr,
};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 0918a39279f6..2f0b0440131c 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -207,12 +207,18 @@ enum btf_kfunc_hook {
enum {
BTF_KFUNC_SET_MAX_CNT = 32,
+ BTF_DTOR_KFUNC_MAX_CNT = 256,
};
struct btf_kfunc_set_tab {
struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX];
};
+struct btf_id_dtor_kfunc_tab {
+ u32 cnt;
+ struct btf_id_dtor_kfunc dtors[];
+};
+
struct btf {
void *data;
struct btf_type **types;
@@ -228,6 +234,7 @@ struct btf {
u32 id;
struct rcu_head rcu;
struct btf_kfunc_set_tab *kfunc_set_tab;
+ struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
/* split BTF support */
struct btf *base_btf;
@@ -1616,8 +1623,19 @@ free_tab:
btf->kfunc_set_tab = NULL;
}
+static void btf_free_dtor_kfunc_tab(struct btf *btf)
+{
+ struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
+
+ if (!tab)
+ return;
+ kfree(tab);
+ btf->dtor_kfunc_tab = NULL;
+}
+
static void btf_free(struct btf *btf)
{
+ btf_free_dtor_kfunc_tab(btf);
btf_free_kfunc_set_tab(btf);
kvfree(btf->types);
kvfree(btf->resolved_sizes);
@@ -3163,24 +3181,86 @@ static void btf_struct_log(struct btf_verifier_env *env,
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
}
+enum btf_field_type {
+ BTF_FIELD_SPIN_LOCK,
+ BTF_FIELD_TIMER,
+ BTF_FIELD_KPTR,
+};
+
+enum {
+ BTF_FIELD_IGNORE = 0,
+ BTF_FIELD_FOUND = 1,
+};
+
+struct btf_field_info {
+ u32 type_id;
+ u32 off;
+ enum bpf_kptr_type type;
+};
+
+static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
+ u32 off, int sz, struct btf_field_info *info)
+{
+ if (!__btf_type_is_struct(t))
+ return BTF_FIELD_IGNORE;
+ if (t->size != sz)
+ return BTF_FIELD_IGNORE;
+ info->off = off;
+ return BTF_FIELD_FOUND;
+}
+
+static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
+ u32 off, int sz, struct btf_field_info *info)
+{
+ enum bpf_kptr_type type;
+ u32 res_id;
+
+ /* For PTR, sz is always == 8 */
+ if (!btf_type_is_ptr(t))
+ return BTF_FIELD_IGNORE;
+ t = btf_type_by_id(btf, t->type);
+
+ if (!btf_type_is_type_tag(t))
+ return BTF_FIELD_IGNORE;
+ /* Reject extra tags */
+ if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
+ return -EINVAL;
+ if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
+ type = BPF_KPTR_UNREF;
+ else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off)))
+ type = BPF_KPTR_REF;
+ else
+ return -EINVAL;
+
+ /* Get the base type */
+ t = btf_type_skip_modifiers(btf, t->type, &res_id);
+ /* Only pointer to struct is allowed */
+ if (!__btf_type_is_struct(t))
+ return -EINVAL;
+
+ info->type_id = res_id;
+ info->off = off;
+ info->type = type;
+ return BTF_FIELD_FOUND;
+}
+
static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
- const char *name, int sz, int align)
+ const char *name, int sz, int align,
+ enum btf_field_type field_type,
+ struct btf_field_info *info, int info_cnt)
{
const struct btf_member *member;
- u32 i, off = -ENOENT;
+ struct btf_field_info tmp;
+ int ret, idx = 0;
+ u32 i, off;
for_each_member(i, t, member) {
const struct btf_type *member_type = btf_type_by_id(btf,
member->type);
- if (!__btf_type_is_struct(member_type))
- continue;
- if (member_type->size != sz)
- continue;
- if (strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
+
+ if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
continue;
- if (off != -ENOENT)
- /* only one such field is allowed */
- return -E2BIG;
+
off = __btf_member_bit_offset(t, member);
if (off % 8)
/* valid C code cannot generate such BTF */
@@ -3188,46 +3268,115 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
off /= 8;
if (off % align)
return -EINVAL;
+
+ switch (field_type) {
+ case BTF_FIELD_SPIN_LOCK:
+ case BTF_FIELD_TIMER:
+ ret = btf_find_struct(btf, member_type, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ case BTF_FIELD_KPTR:
+ ret = btf_find_kptr(btf, member_type, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (ret == BTF_FIELD_IGNORE)
+ continue;
+ if (idx >= info_cnt)
+ return -E2BIG;
+ ++idx;
}
- return off;
+ return idx;
}
static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
- const char *name, int sz, int align)
+ const char *name, int sz, int align,
+ enum btf_field_type field_type,
+ struct btf_field_info *info, int info_cnt)
{
const struct btf_var_secinfo *vsi;
- u32 i, off = -ENOENT;
+ struct btf_field_info tmp;
+ int ret, idx = 0;
+ u32 i, off;
for_each_vsi(i, t, vsi) {
const struct btf_type *var = btf_type_by_id(btf, vsi->type);
const struct btf_type *var_type = btf_type_by_id(btf, var->type);
- if (!__btf_type_is_struct(var_type))
- continue;
- if (var_type->size != sz)
+ off = vsi->offset;
+
+ if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
continue;
if (vsi->size != sz)
continue;
- if (strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
- continue;
- if (off != -ENOENT)
- /* only one such field is allowed */
- return -E2BIG;
- off = vsi->offset;
if (off % align)
return -EINVAL;
+
+ switch (field_type) {
+ case BTF_FIELD_SPIN_LOCK:
+ case BTF_FIELD_TIMER:
+ ret = btf_find_struct(btf, var_type, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ case BTF_FIELD_KPTR:
+ ret = btf_find_kptr(btf, var_type, off, sz,
+ idx < info_cnt ? &info[idx] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (ret == BTF_FIELD_IGNORE)
+ continue;
+ if (idx >= info_cnt)
+ return -E2BIG;
+ ++idx;
}
- return off;
+ return idx;
}
static int btf_find_field(const struct btf *btf, const struct btf_type *t,
- const char *name, int sz, int align)
+ enum btf_field_type field_type,
+ struct btf_field_info *info, int info_cnt)
{
+ const char *name;
+ int sz, align;
+
+ switch (field_type) {
+ case BTF_FIELD_SPIN_LOCK:
+ name = "bpf_spin_lock";
+ sz = sizeof(struct bpf_spin_lock);
+ align = __alignof__(struct bpf_spin_lock);
+ break;
+ case BTF_FIELD_TIMER:
+ name = "bpf_timer";
+ sz = sizeof(struct bpf_timer);
+ align = __alignof__(struct bpf_timer);
+ break;
+ case BTF_FIELD_KPTR:
+ name = NULL;
+ sz = sizeof(u64);
+ align = 8;
+ break;
+ default:
+ return -EFAULT;
+ }
if (__btf_type_is_struct(t))
- return btf_find_struct_field(btf, t, name, sz, align);
+ return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt);
else if (btf_type_is_datasec(t))
- return btf_find_datasec_var(btf, t, name, sz, align);
+ return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt);
return -EINVAL;
}
@@ -3237,16 +3386,130 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
*/
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
{
- return btf_find_field(btf, t, "bpf_spin_lock",
- sizeof(struct bpf_spin_lock),
- __alignof__(struct bpf_spin_lock));
+ struct btf_field_info info;
+ int ret;
+
+ ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENOENT;
+ return info.off;
}
int btf_find_timer(const struct btf *btf, const struct btf_type *t)
{
- return btf_find_field(btf, t, "bpf_timer",
- sizeof(struct bpf_timer),
- __alignof__(struct bpf_timer));
+ struct btf_field_info info;
+ int ret;
+
+ ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENOENT;
+ return info.off;
+}
+
+struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
+ const struct btf_type *t)
+{
+ struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX];
+ struct bpf_map_value_off *tab;
+ struct btf *kernel_btf = NULL;
+ struct module *mod = NULL;
+ int ret, i, nr_off;
+
+ ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr));
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (!ret)
+ return NULL;
+
+ nr_off = ret;
+ tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN);
+ if (!tab)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_off; i++) {
+ const struct btf_type *t;
+ s32 id;
+
+ /* Find type in map BTF, and use it to look up the matching type
+ * in vmlinux or module BTFs, by name and kind.
+ */
+ t = btf_type_by_id(btf, info_arr[i].type_id);
+ id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
+ &kernel_btf);
+ if (id < 0) {
+ ret = id;
+ goto end;
+ }
+
+ /* Find and stash the function pointer for the destruction function that
+ * needs to be eventually invoked from the map free path.
+ */
+ if (info_arr[i].type == BPF_KPTR_REF) {
+ const struct btf_type *dtor_func;
+ const char *dtor_func_name;
+ unsigned long addr;
+ s32 dtor_btf_id;
+
+ /* This call also serves as a whitelist of allowed objects that
+ * can be used as a referenced pointer and be stored in a map at
+ * the same time.
+ */
+ dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id);
+ if (dtor_btf_id < 0) {
+ ret = dtor_btf_id;
+ goto end_btf;
+ }
+
+ dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id);
+ if (!dtor_func) {
+ ret = -ENOENT;
+ goto end_btf;
+ }
+
+ if (btf_is_module(kernel_btf)) {
+ mod = btf_try_get_module(kernel_btf);
+ if (!mod) {
+ ret = -ENXIO;
+ goto end_btf;
+ }
+ }
+
+ /* We already verified dtor_func to be btf_type_is_func
+ * in register_btf_id_dtor_kfuncs.
+ */
+ dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off);
+ addr = kallsyms_lookup_name(dtor_func_name);
+ if (!addr) {
+ ret = -EINVAL;
+ goto end_mod;
+ }
+ tab->off[i].kptr.dtor = (void *)addr;
+ }
+
+ tab->off[i].offset = info_arr[i].off;
+ tab->off[i].type = info_arr[i].type;
+ tab->off[i].kptr.btf_id = id;
+ tab->off[i].kptr.btf = kernel_btf;
+ tab->off[i].kptr.module = mod;
+ }
+ tab->nr_off = nr_off;
+ return tab;
+end_mod:
+ module_put(mod);
+end_btf:
+ btf_put(kernel_btf);
+end:
+ while (i--) {
+ btf_put(tab->off[i].kptr.btf);
+ if (tab->off[i].kptr.module)
+ module_put(tab->off[i].kptr.module);
+ }
+ kfree(tab);
+ return ERR_PTR(ret);
}
static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
@@ -4541,6 +4804,48 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
return 0;
}
+static int btf_check_type_tags(struct btf_verifier_env *env,
+ struct btf *btf, int start_id)
+{
+ int i, n, good_id = start_id - 1;
+ bool in_tags;
+
+ n = btf_nr_types(btf);
+ for (i = start_id; i < n; i++) {
+ const struct btf_type *t;
+ u32 cur_id = i;
+
+ t = btf_type_by_id(btf, i);
+ if (!t)
+ return -EINVAL;
+ if (!btf_type_is_modifier(t))
+ continue;
+
+ cond_resched();
+
+ in_tags = btf_type_is_type_tag(t);
+ while (btf_type_is_modifier(t)) {
+ if (btf_type_is_type_tag(t)) {
+ if (!in_tags) {
+ btf_verifier_log(env, "Type tags don't precede modifiers");
+ return -EINVAL;
+ }
+ } else if (in_tags) {
+ in_tags = false;
+ }
+ if (cur_id <= good_id)
+ break;
+ /* Move to next type */
+ cur_id = t->type;
+ t = btf_type_by_id(btf, cur_id);
+ if (!t)
+ return -EINVAL;
+ }
+ good_id = i;
+ }
+ return 0;
+}
+
static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
u32 log_level, char __user *log_ubuf, u32 log_size)
{
@@ -4608,6 +4913,10 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
if (err)
goto errout;
+ err = btf_check_type_tags(env, btf, 1);
+ if (err)
+ goto errout;
+
if (log->level && bpf_verifier_log_full(log)) {
err = -ENOSPC;
goto errout;
@@ -4716,41 +5025,6 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
return ctx_type;
}
-static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
-#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
-#define BPF_LINK_TYPE(_id, _name)
-#define BPF_MAP_TYPE(_id, _ops) \
- [_id] = &_ops,
-#include <linux/bpf_types.h>
-#undef BPF_PROG_TYPE
-#undef BPF_LINK_TYPE
-#undef BPF_MAP_TYPE
-};
-
-static int btf_vmlinux_map_ids_init(const struct btf *btf,
- struct bpf_verifier_log *log)
-{
- const struct bpf_map_ops *ops;
- int i, btf_id;
-
- for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
- ops = btf_vmlinux_map_ops[i];
- if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
- continue;
- if (!ops->map_btf_name || !ops->map_btf_id) {
- bpf_log(log, "map type %d is misconfigured\n", i);
- return -EINVAL;
- }
- btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
- BTF_KIND_STRUCT);
- if (btf_id < 0)
- return btf_id;
- *ops->map_btf_id = btf_id;
- }
-
- return 0;
-}
-
static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
struct btf *btf,
const struct btf_type *t,
@@ -4809,14 +5083,13 @@ struct btf *btf_parse_vmlinux(void)
if (err)
goto errout;
+ err = btf_check_type_tags(env, btf, 1);
+ if (err)
+ goto errout;
+
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
- /* find bpf map structs for map_ptr access checking */
- err = btf_vmlinux_map_ids_init(btf, log);
- if (err < 0)
- goto errout;
-
bpf_struct_ops_init(btf, log);
refcount_set(&btf->refcnt, 1);
@@ -4894,6 +5167,10 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u
if (err)
goto errout;
+ err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
+ if (err)
+ goto errout;
+
btf_verifier_env_free(env);
refcount_set(&btf->refcnt, 1);
return btf;
@@ -5429,7 +5706,8 @@ static bool btf_types_are_same(const struct btf *btf1, u32 id1,
bool btf_struct_ids_match(struct bpf_verifier_log *log,
const struct btf *btf, u32 id, int off,
- const struct btf *need_btf, u32 need_type_id)
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict)
{
const struct btf_type *type;
enum bpf_type_flag flag;
@@ -5438,7 +5716,12 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
/* Are we already done? */
if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
return true;
-
+ /* In case of strict type match, we do not walk struct, the top level
+ * type match must succeed. When strict is true, off should have already
+ * been 0.
+ */
+ if (strict)
+ return false;
again:
type = btf_type_by_id(btf, id);
if (!type)
@@ -5772,11 +6055,11 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
struct bpf_verifier_log *log = &env->log;
u32 i, nargs, ref_id, ref_obj_id = 0;
bool is_kfunc = btf_is_kernel(btf);
+ bool rel = false, kptr_get = false;
const char *func_name, *ref_tname;
const struct btf_type *t, *ref_t;
const struct btf_param *args;
int ref_regno = 0, ret;
- bool rel = false;
t = btf_type_by_id(btf, func_id);
if (!t || !btf_type_is_func(t)) {
@@ -5802,14 +6085,19 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
- /* Only kfunc can be release func */
- if (is_kfunc)
+ if (is_kfunc) {
+ /* Only kfunc can be release func */
rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog),
BTF_KFUNC_TYPE_RELEASE, func_id);
+ kptr_get = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog),
+ BTF_KFUNC_TYPE_KPTR_ACQUIRE, func_id);
+ }
+
/* check that BTF function arguments match actual types that the
* verifier sees.
*/
for (i = 0; i < nargs; i++) {
+ enum bpf_arg_type arg_type = ARG_DONTCARE;
u32 regno = i + 1;
struct bpf_reg_state *reg = &regs[regno];
@@ -5830,12 +6118,58 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
ref_tname = btf_name_by_offset(btf, ref_t->name_off);
- ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE, rel);
+ if (rel && reg->ref_obj_id)
+ arg_type |= OBJ_RELEASE;
+ ret = check_func_arg_reg_off(env, reg, regno, arg_type);
if (ret < 0)
return ret;
- if (btf_get_prog_ctx_type(log, btf, t,
- env->prog->type, i)) {
+ /* kptr_get is only true for kfunc */
+ if (i == 0 && kptr_get) {
+ struct bpf_map_value_off_desc *off_desc;
+
+ if (reg->type != PTR_TO_MAP_VALUE) {
+ bpf_log(log, "arg#0 expected pointer to map value\n");
+ return -EINVAL;
+ }
+
+ /* check_func_arg_reg_off allows var_off for
+ * PTR_TO_MAP_VALUE, but we need fixed offset to find
+ * off_desc.
+ */
+ if (!tnum_is_const(reg->var_off)) {
+ bpf_log(log, "arg#0 must have constant offset\n");
+ return -EINVAL;
+ }
+
+ off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value);
+ if (!off_desc || off_desc->type != BPF_KPTR_REF) {
+ bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n",
+ reg->off + reg->var_off.value);
+ return -EINVAL;
+ }
+
+ if (!btf_type_is_ptr(ref_t)) {
+ bpf_log(log, "arg#0 BTF type must be a double pointer\n");
+ return -EINVAL;
+ }
+
+ ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id);
+ ref_tname = btf_name_by_offset(btf, ref_t->name_off);
+
+ if (!btf_type_is_struct(ref_t)) {
+ bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
+ func_name, i, btf_type_str(ref_t), ref_tname);
+ return -EINVAL;
+ }
+ if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf,
+ off_desc->kptr.btf_id, true)) {
+ bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n",
+ func_name, i, btf_type_str(ref_t), ref_tname);
+ return -EINVAL;
+ }
+ /* rest of the arguments can be anything, like normal kfunc */
+ } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) {
/* If function expects ctx type in BTF check that caller
* is passing PTR_TO_CTX.
*/
@@ -5862,11 +6196,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
if (reg->type == PTR_TO_BTF_ID) {
reg_btf = reg->btf;
reg_ref_id = reg->btf_id;
- /* Ensure only one argument is referenced
- * PTR_TO_BTF_ID, check_func_arg_reg_off relies
- * on only one referenced register being allowed
- * for kfuncs.
- */
+ /* Ensure only one argument is referenced PTR_TO_BTF_ID */
if (reg->ref_obj_id) {
if (ref_obj_id) {
bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
@@ -5886,7 +6216,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
reg_ref_tname = btf_name_by_offset(reg_btf,
reg_ref_t->name_off);
if (!btf_struct_ids_match(log, reg_btf, reg_ref_id,
- reg->off, btf, ref_id)) {
+ reg->off, btf, ref_id, rel && reg->ref_obj_id)) {
bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
func_name, i,
btf_type_str(ref_t), ref_tname,
@@ -6832,6 +7162,138 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
}
EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
+ struct btf_id_dtor_kfunc *dtor;
+
+ if (!tab)
+ return -ENOENT;
+ /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
+ * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
+ */
+ BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
+ dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
+ if (!dtor)
+ return -ENOENT;
+ return dtor->kfunc_btf_id;
+}
+
+static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
+{
+ const struct btf_type *dtor_func, *dtor_func_proto, *t;
+ const struct btf_param *args;
+ s32 dtor_btf_id;
+ u32 nr_args, i;
+
+ for (i = 0; i < cnt; i++) {
+ dtor_btf_id = dtors[i].kfunc_btf_id;
+
+ dtor_func = btf_type_by_id(btf, dtor_btf_id);
+ if (!dtor_func || !btf_type_is_func(dtor_func))
+ return -EINVAL;
+
+ dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
+ if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
+ return -EINVAL;
+
+ /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
+ t = btf_type_by_id(btf, dtor_func_proto->type);
+ if (!t || !btf_type_is_void(t))
+ return -EINVAL;
+
+ nr_args = btf_type_vlen(dtor_func_proto);
+ if (nr_args != 1)
+ return -EINVAL;
+ args = btf_params(dtor_func_proto);
+ t = btf_type_by_id(btf, args[0].type);
+ /* Allow any pointer type, as width on targets Linux supports
+ * will be same for all pointer types (i.e. sizeof(void *))
+ */
+ if (!t || !btf_type_is_ptr(t))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This function must be invoked only from initcalls/module init functions */
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner)
+{
+ struct btf_id_dtor_kfunc_tab *tab;
+ struct btf *btf;
+ u32 tab_cnt;
+ int ret;
+
+ btf = btf_get_module_btf(owner);
+ if (!btf) {
+ if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
+ pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
+ return -ENOENT;
+ }
+ if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
+ pr_err("missing module BTF, cannot register dtor kfuncs\n");
+ return -ENOENT;
+ }
+ return 0;
+ }
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+
+ if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
+ pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
+ ret = -E2BIG;
+ goto end;
+ }
+
+ /* Ensure that the prototype of dtor kfuncs being registered is sane */
+ ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
+ if (ret < 0)
+ goto end;
+
+ tab = btf->dtor_kfunc_tab;
+ /* Only one call allowed for modules */
+ if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ tab_cnt = tab ? tab->cnt : 0;
+ if (tab_cnt > U32_MAX - add_cnt) {
+ ret = -EOVERFLOW;
+ goto end;
+ }
+ if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
+ pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
+ ret = -E2BIG;
+ goto end;
+ }
+
+ tab = krealloc(btf->dtor_kfunc_tab,
+ offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tab) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ if (!btf->dtor_kfunc_tab)
+ tab->cnt = 0;
+ btf->dtor_kfunc_tab = tab;
+
+ memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
+ tab->cnt += add_cnt;
+
+ sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
+
+ return 0;
+end:
+ btf_free_dtor_kfunc_tab(btf);
+ btf_put(btf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
+
#define MAX_TYPES_ARE_COMPAT_DEPTH 2
static
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 128028efda64..afb414b26d01 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -22,6 +22,45 @@
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+/* __always_inline is necessary to prevent indirect call through run_prog
+ * function pointer.
+ */
+static __always_inline int
+bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
+ enum cgroup_bpf_attach_type atype,
+ const void *ctx, bpf_prog_run_fn run_prog,
+ int retval, u32 *ret_flags)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_cg_run_ctx run_ctx;
+ u32 func_ret;
+
+ run_ctx.retval = retval;
+ migrate_disable();
+ rcu_read_lock();
+ array = rcu_dereference(cgrp->effective[atype]);
+ item = &array->items[0];
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.prog_item = item;
+ func_ret = run_prog(prog, ctx);
+ if (ret_flags) {
+ *(ret_flags) |= (func_ret >> 1);
+ func_ret &= 1;
+ }
+ if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
+ run_ctx.retval = -EPERM;
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ rcu_read_unlock();
+ migrate_enable();
+ return run_ctx.retval;
+}
+
void cgroup_bpf_offline(struct cgroup *cgrp)
{
cgroup_get(cgrp);
@@ -1075,11 +1114,38 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
bpf_compute_and_save_data_end(skb, &saved_data_end);
if (atype == CGROUP_INET_EGRESS) {
- ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
- cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
+ u32 flags = 0;
+ bool cn;
+
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
+ __bpf_prog_run_save_cb, 0, &flags);
+
+ /* Return values of CGROUP EGRESS BPF programs are:
+ * 0: drop packet
+ * 1: keep packet
+ * 2: drop packet and cn
+ * 3: keep packet and cn
+ *
+ * The returned value is then converted to one of the NET_XMIT
+ * or an error code that is then interpreted as drop packet
+ * (and no cn):
+ * 0: NET_XMIT_SUCCESS skb should be transmitted
+ * 1: NET_XMIT_DROP skb should be dropped and cn
+ * 2: NET_XMIT_CN skb should be transmitted and cn
+ * 3: -err skb should be dropped
+ */
+
+ cn = flags & BPF_RET_SET_CN;
+ if (ret && !IS_ERR_VALUE((long)ret))
+ ret = -EFAULT;
+ if (!ret)
+ ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
+ else
+ ret = (cn ? NET_XMIT_DROP : ret);
} else {
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
- __bpf_prog_run_save_cb, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
+ skb, __bpf_prog_run_save_cb, 0,
+ NULL);
if (ret && !IS_ERR_VALUE((long)ret))
ret = -EFAULT;
}
@@ -1109,8 +1175,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk,
- bpf_prog_run, 0);
+ return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
+ NULL);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
@@ -1155,8 +1221,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
}
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
- bpf_prog_run, 0, flags);
+ return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
+ 0, flags);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
@@ -1182,8 +1248,8 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
- bpf_prog_run, 0);
+ return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
+ 0, NULL);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
@@ -1200,8 +1266,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
rcu_read_lock();
cgrp = task_dfl_cgroup(current);
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
- bpf_prog_run, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
+ NULL);
rcu_read_unlock();
return ret;
@@ -1366,8 +1432,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
rcu_read_lock();
cgrp = task_dfl_cgroup(current);
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
- bpf_prog_run, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
+ NULL);
rcu_read_unlock();
kfree(ctx.cur_val);
@@ -1459,8 +1525,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
}
lock_sock(sk);
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
- &ctx, bpf_prog_run, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
+ &ctx, bpf_prog_run, 0, NULL);
release_sock(sk);
if (ret)
@@ -1559,8 +1625,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
}
lock_sock(sk);
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
- &ctx, bpf_prog_run, retval);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
+ &ctx, bpf_prog_run, retval, NULL);
release_sock(sk);
if (ret < 0)
@@ -1608,8 +1674,8 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
* be called if that data shouldn't be "exported".
*/
- ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
- &ctx, bpf_prog_run, retval);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
+ &ctx, bpf_prog_run, retval, NULL);
if (ret < 0)
return ret;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 650e5d21f90d..f4860ac756cd 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/capability.h>
#include <trace/events/xdp.h>
+#include <linux/btf_ids.h>
#include <linux/netdevice.h> /* netif_receive_skb_list */
#include <linux/etherdevice.h> /* eth_type_trans */
@@ -673,7 +674,7 @@ static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
__cpu_map_lookup_elem);
}
-static int cpu_map_btf_id;
+BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
const struct bpf_map_ops cpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = cpu_map_alloc,
@@ -683,8 +684,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_cpu_map",
- .map_btf_id = &cpu_map_btf_id,
+ .map_btf_id = &cpu_map_btf_ids[0],
.map_redirect = cpu_map_redirect,
};
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 038f6d7a83e4..c2867068e5bd 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -48,6 +48,7 @@
#include <net/xdp.h>
#include <linux/filter.h>
#include <trace/events/xdp.h>
+#include <linux/btf_ids.h>
#define DEV_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -1005,7 +1006,7 @@ static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
__dev_map_hash_lookup_elem);
}
-static int dev_map_btf_id;
+BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc,
@@ -1015,12 +1016,10 @@ const struct bpf_map_ops dev_map_ops = {
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_dtab",
- .map_btf_id = &dev_map_btf_id,
+ .map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_map_redirect,
};
-static int dev_map_hash_map_btf_id;
const struct bpf_map_ops dev_map_hash_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc,
@@ -1030,8 +1029,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
.map_update_elem = dev_map_hash_update_elem,
.map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_dtab",
- .map_btf_id = &dev_map_hash_map_btf_id,
+ .map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_hash_map_redirect,
};
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 65877967f414..3e00e62b2218 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -10,6 +10,7 @@
#include <linux/random.h>
#include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h>
+#include <linux/btf_ids.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
@@ -238,7 +239,7 @@ static void htab_free_prealloced_timers(struct bpf_htab *htab)
u32 num_entries = htab->map.max_entries;
int i;
- if (likely(!map_value_has_timer(&htab->map)))
+ if (!map_value_has_timer(&htab->map))
return;
if (htab_has_extra_elems(htab))
num_entries += num_possible_cpus();
@@ -254,6 +255,25 @@ static void htab_free_prealloced_timers(struct bpf_htab *htab)
}
}
+static void htab_free_prealloced_kptrs(struct bpf_htab *htab)
+{
+ u32 num_entries = htab->map.max_entries;
+ int i;
+
+ if (!map_value_has_kptrs(&htab->map))
+ return;
+ if (htab_has_extra_elems(htab))
+ num_entries += num_possible_cpus();
+
+ for (i = 0; i < num_entries; i++) {
+ struct htab_elem *elem;
+
+ elem = get_htab_elem(htab, i);
+ bpf_map_free_kptrs(&htab->map, elem->key + round_up(htab->map.key_size, 8));
+ cond_resched();
+ }
+}
+
static void htab_free_elems(struct bpf_htab *htab)
{
int i;
@@ -725,12 +745,15 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map,
return insn - insn_buf;
}
-static void check_and_free_timer(struct bpf_htab *htab, struct htab_elem *elem)
+static void check_and_free_fields(struct bpf_htab *htab,
+ struct htab_elem *elem)
{
- if (unlikely(map_value_has_timer(&htab->map)))
- bpf_timer_cancel_and_free(elem->key +
- round_up(htab->map.key_size, 8) +
- htab->map.timer_off);
+ void *map_value = elem->key + round_up(htab->map.key_size, 8);
+
+ if (map_value_has_timer(&htab->map))
+ bpf_timer_cancel_and_free(map_value + htab->map.timer_off);
+ if (map_value_has_kptrs(&htab->map))
+ bpf_map_free_kptrs(&htab->map, map_value);
}
/* It is called from the bpf_lru_list when the LRU needs to delete
@@ -738,7 +761,7 @@ static void check_and_free_timer(struct bpf_htab *htab, struct htab_elem *elem)
*/
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
- struct bpf_htab *htab = (struct bpf_htab *)arg;
+ struct bpf_htab *htab = arg;
struct htab_elem *l = NULL, *tgt_l;
struct hlist_nulls_head *head;
struct hlist_nulls_node *n;
@@ -757,7 +780,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
hlist_nulls_del_rcu(&l->hash_node);
- check_and_free_timer(htab, l);
+ check_and_free_fields(htab, l);
break;
}
@@ -829,7 +852,7 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
{
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
- check_and_free_timer(htab, l);
+ check_and_free_fields(htab, l);
kfree(l);
}
@@ -857,7 +880,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
htab_put_fd_value(htab, l);
if (htab_is_prealloc(htab)) {
- check_and_free_timer(htab, l);
+ check_and_free_fields(htab, l);
__pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
atomic_dec(&htab->count);
@@ -1104,7 +1127,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
if (!htab_is_prealloc(htab))
free_htab_elem(htab, l_old);
else
- check_and_free_timer(htab, l_old);
+ check_and_free_fields(htab, l_old);
}
ret = 0;
err:
@@ -1114,7 +1137,7 @@ err:
static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
{
- check_and_free_timer(htab, elem);
+ check_and_free_fields(htab, elem);
bpf_lru_push_free(&htab->lru, &elem->lru_node);
}
@@ -1419,8 +1442,14 @@ static void htab_free_malloced_timers(struct bpf_htab *htab)
struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_nulls_for_each_entry(l, n, head, hash_node)
- check_and_free_timer(htab, l);
+ hlist_nulls_for_each_entry(l, n, head, hash_node) {
+ /* We don't reset or free kptr on uref dropping to zero,
+ * hence just free timer.
+ */
+ bpf_timer_cancel_and_free(l->key +
+ round_up(htab->map.key_size, 8) +
+ htab->map.timer_off);
+ }
cond_resched_rcu();
}
rcu_read_unlock();
@@ -1430,7 +1459,8 @@ static void htab_map_free_timers(struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- if (likely(!map_value_has_timer(&htab->map)))
+ /* We don't reset or free kptr on uref dropping to zero. */
+ if (!map_value_has_timer(&htab->map))
return;
if (!htab_is_prealloc(htab))
htab_free_malloced_timers(htab);
@@ -1453,11 +1483,14 @@ static void htab_map_free(struct bpf_map *map)
* not have executed. Wait for them.
*/
rcu_barrier();
- if (!htab_is_prealloc(htab))
+ if (!htab_is_prealloc(htab)) {
delete_all_elements(htab);
- else
+ } else {
+ htab_free_prealloced_kptrs(htab);
prealloc_destroy(htab);
+ }
+ bpf_map_free_kptr_off_tab(map);
free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
@@ -2105,7 +2138,7 @@ out:
return num_elems;
}
-static int htab_map_btf_id;
+BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
@@ -2122,12 +2155,10 @@ const struct bpf_map_ops htab_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab),
- .map_btf_name = "bpf_htab",
- .map_btf_id = &htab_map_btf_id,
+ .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
-static int htab_lru_map_btf_id;
const struct bpf_map_ops htab_lru_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
@@ -2145,8 +2176,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_lru),
- .map_btf_name = "bpf_htab",
- .map_btf_id = &htab_lru_map_btf_id,
+ .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
@@ -2252,7 +2282,6 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
-static int htab_percpu_map_btf_id;
const struct bpf_map_ops htab_percpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
@@ -2267,12 +2296,10 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_percpu),
- .map_btf_name = "bpf_htab",
- .map_btf_id = &htab_percpu_map_btf_id,
+ .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
-static int htab_lru_percpu_map_btf_id;
const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
@@ -2287,8 +2314,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
BATCH_OPS(htab_lru_percpu),
- .map_btf_name = "bpf_htab",
- .map_btf_id = &htab_lru_percpu_map_btf_id,
+ .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
};
@@ -2412,7 +2438,6 @@ static void htab_of_map_free(struct bpf_map *map)
fd_htab_map_free(map);
}
-static int htab_of_maps_map_btf_id;
const struct bpf_map_ops htab_of_maps_map_ops = {
.map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc,
@@ -2425,6 +2450,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_htab",
- .map_btf_id = &htab_of_maps_map_btf_id,
+ .map_btf_id = &htab_map_btf_ids[0],
};
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 315053ef6a75..3e709fed5306 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1374,6 +1374,28 @@ out:
kfree(t);
}
+BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+{
+ unsigned long *kptr = map_value;
+
+ return xchg(kptr, (unsigned long)ptr);
+}
+
+/* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
+ * helper is determined dynamically by the verifier.
+ */
+#define BPF_PTR_POISON ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+
+const struct bpf_func_proto bpf_kptr_xchg_proto = {
+ .func = bpf_kptr_xchg,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
+ .ret_btf_id = BPF_PTR_POISON,
+ .arg1_type = ARG_PTR_TO_KPTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
+ .arg2_btf_id = BPF_PTR_POISON,
+};
+
const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
@@ -1452,6 +1474,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_timer_start_proto;
case BPF_FUNC_timer_cancel:
return &bpf_timer_cancel_proto;
+ case BPF_FUNC_kptr_xchg:
+ return &bpf_kptr_xchg_proto;
default:
break;
}
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 497916060ac7..8654fc97f5fe 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -9,6 +9,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <uapi/linux/btf.h>
+#include <linux/btf_ids.h>
#ifdef CONFIG_CGROUP_BPF
@@ -446,7 +447,8 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
-static int cgroup_storage_map_btf_id;
+BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
+ bpf_cgroup_storage_map)
const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free,
@@ -456,8 +458,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem,
- .map_btf_name = "bpf_cgroup_storage_map",
- .map_btf_id = &cgroup_storage_map_btf_id,
+ .map_btf_id = &cgroup_storage_map_btf_ids[0],
};
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 5763cc7ac4f1..f0d05a3cc4b9 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <net/ipv6.h>
#include <uapi/linux/btf.h>
+#include <linux/btf_ids.h>
/* Intermediate node */
#define LPM_TREE_NODE_FLAG_IM BIT(0)
@@ -719,7 +720,7 @@ static int trie_check_btf(const struct bpf_map *map,
-EINVAL : 0;
}
-static int trie_map_btf_id;
+BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
const struct bpf_map_ops trie_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = trie_alloc,
@@ -732,6 +733,5 @@ const struct bpf_map_ops trie_map_ops = {
.map_update_batch = generic_map_update_batch,
.map_delete_batch = generic_map_delete_batch,
.map_check_btf = trie_check_btf,
- .map_btf_name = "lpm_trie",
- .map_btf_id = &trie_map_btf_id,
+ .map_btf_id = &trie_map_btf_ids[0],
};
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 5cd8f5277279..135205d0d560 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -52,6 +52,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta->max_entries = inner_map->max_entries;
inner_map_meta->spin_lock_off = inner_map->spin_lock_off;
inner_map_meta->timer_off = inner_map->timer_off;
+ inner_map_meta->kptr_off_tab = bpf_map_copy_kptr_off_tab(inner_map);
if (inner_map->btf) {
btf_get(inner_map->btf);
inner_map_meta->btf = inner_map->btf;
@@ -71,6 +72,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
void bpf_map_meta_free(struct bpf_map *map_meta)
{
+ bpf_map_free_kptr_off_tab(map_meta);
btf_put(map_meta->btf);
kfree(map_meta);
}
@@ -83,7 +85,8 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
meta0->key_size == meta1->key_size &&
meta0->value_size == meta1->value_size &&
meta0->timer_off == meta1->timer_off &&
- meta0->map_flags == meta1->map_flags;
+ meta0->map_flags == meta1->map_flags &&
+ bpf_map_equal_kptr_off_tab(meta0, meta1);
}
void *bpf_map_fd_get_ptr(struct bpf_map *map,
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index f9c734aaa990..a1c0794ae49d 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/capability.h>
+#include <linux/btf_ids.h>
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -247,7 +248,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
return -EINVAL;
}
-static int queue_map_btf_id;
+BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
const struct bpf_map_ops queue_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
@@ -260,11 +261,9 @@ const struct bpf_map_ops queue_map_ops = {
.map_pop_elem = queue_map_pop_elem,
.map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
- .map_btf_name = "bpf_queue_stack",
- .map_btf_id = &queue_map_btf_id,
+ .map_btf_id = &queue_map_btf_ids[0],
};
-static int stack_map_btf_id;
const struct bpf_map_ops stack_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
@@ -277,6 +276,5 @@ const struct bpf_map_ops stack_map_ops = {
.map_pop_elem = stack_map_pop_elem,
.map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
- .map_btf_name = "bpf_queue_stack",
- .map_btf_id = &stack_map_btf_id,
+ .map_btf_id = &queue_map_btf_ids[0],
};
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 8251243022a2..e2618fb5870e 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -6,6 +6,7 @@
#include <linux/err.h>
#include <linux/sock_diag.h>
#include <net/sock_reuseport.h>
+#include <linux/btf_ids.h>
struct reuseport_array {
struct bpf_map map;
@@ -337,7 +338,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
return 0;
}
-static int reuseport_array_map_btf_id;
+BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array)
const struct bpf_map_ops reuseport_array_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = reuseport_array_alloc_check,
@@ -346,6 +347,5 @@ const struct bpf_map_ops reuseport_array_ops = {
.map_lookup_elem = reuseport_array_lookup_elem,
.map_get_next_key = reuseport_array_get_next_key,
.map_delete_elem = reuseport_array_delete_elem,
- .map_btf_name = "reuseport_array",
- .map_btf_id = &reuseport_array_map_btf_id,
+ .map_btf_id = &reuseport_array_map_btf_ids[0],
};
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 710ba9de12ce..311264ab80c4 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -10,6 +10,7 @@
#include <linux/poll.h>
#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
+#include <linux/btf_ids.h>
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@@ -263,7 +264,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
return 0;
}
-static int ringbuf_map_btf_id;
+BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
const struct bpf_map_ops ringbuf_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = ringbuf_map_alloc,
@@ -274,8 +275,7 @@ const struct bpf_map_ops ringbuf_map_ops = {
.map_update_elem = ringbuf_map_update_elem,
.map_delete_elem = ringbuf_map_delete_elem,
.map_get_next_key = ringbuf_map_get_next_key,
- .map_btf_name = "bpf_ringbuf_map",
- .map_btf_id = &ringbuf_map_btf_id,
+ .map_btf_id = &ringbuf_map_btf_ids[0],
};
/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
@@ -404,7 +404,7 @@ BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
const struct bpf_func_proto bpf_ringbuf_submit_proto = {
.func = bpf_ringbuf_submit,
.ret_type = RET_VOID,
- .arg1_type = ARG_PTR_TO_ALLOC_MEM,
+ .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE,
.arg2_type = ARG_ANYTHING,
};
@@ -417,7 +417,7 @@ BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
const struct bpf_func_proto bpf_ringbuf_discard_proto = {
.func = bpf_ringbuf_discard,
.ret_type = RET_VOID,
- .arg1_type = ARG_PTR_TO_ALLOC_MEM,
+ .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE,
.arg2_type = ARG_ANYTHING,
};
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 34725bfa1e97..1adbe67cdb95 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -100,13 +100,11 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
if (!smap)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&smap->map, attr);
- smap->map.value_size = value_size;
smap->n_buckets = n_buckets;
err = get_callchain_buffers(sysctl_perf_event_max_stack);
@@ -656,7 +654,7 @@ static void stack_map_free(struct bpf_map *map)
put_callchain_buffers();
}
-static int stack_trace_map_btf_id;
+BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
const struct bpf_map_ops stack_trace_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = stack_map_alloc,
@@ -666,6 +664,5 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_stack_map",
- .map_btf_id = &stack_trace_map_btf_id,
+ .map_btf_id = &stack_trace_map_btf_ids[0],
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cdaa1152436a..e0aead17dff4 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -6,6 +6,7 @@
#include <linux/bpf_trace.h>
#include <linux/bpf_lirc.h>
#include <linux/bpf_verifier.h>
+#include <linux/bsearch.h>
#include <linux/btf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
@@ -29,6 +30,7 @@
#include <linux/pgtable.h>
#include <linux/bpf_lsm.h>
#include <linux/poll.h>
+#include <linux/sort.h>
#include <linux/bpf-netns.h>
#include <linux/rcupdate_trace.h>
#include <linux/memcontrol.h>
@@ -473,14 +475,128 @@ static void bpf_map_release_memcg(struct bpf_map *map)
}
#endif
+static int bpf_map_kptr_off_cmp(const void *a, const void *b)
+{
+ const struct bpf_map_value_off_desc *off_desc1 = a, *off_desc2 = b;
+
+ if (off_desc1->offset < off_desc2->offset)
+ return -1;
+ else if (off_desc1->offset > off_desc2->offset)
+ return 1;
+ return 0;
+}
+
+struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset)
+{
+ /* Since members are iterated in btf_find_field in increasing order,
+ * offsets appended to kptr_off_tab are in increasing order, so we can
+ * do bsearch to find exact match.
+ */
+ struct bpf_map_value_off *tab;
+
+ if (!map_value_has_kptrs(map))
+ return NULL;
+ tab = map->kptr_off_tab;
+ return bsearch(&offset, tab->off, tab->nr_off, sizeof(tab->off[0]), bpf_map_kptr_off_cmp);
+}
+
+void bpf_map_free_kptr_off_tab(struct bpf_map *map)
+{
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ int i;
+
+ if (!map_value_has_kptrs(map))
+ return;
+ for (i = 0; i < tab->nr_off; i++) {
+ if (tab->off[i].kptr.module)
+ module_put(tab->off[i].kptr.module);
+ btf_put(tab->off[i].kptr.btf);
+ }
+ kfree(tab);
+ map->kptr_off_tab = NULL;
+}
+
+struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map)
+{
+ struct bpf_map_value_off *tab = map->kptr_off_tab, *new_tab;
+ int size, i;
+
+ if (!map_value_has_kptrs(map))
+ return ERR_PTR(-ENOENT);
+ size = offsetof(struct bpf_map_value_off, off[tab->nr_off]);
+ new_tab = kmemdup(tab, size, GFP_KERNEL | __GFP_NOWARN);
+ if (!new_tab)
+ return ERR_PTR(-ENOMEM);
+ /* Do a deep copy of the kptr_off_tab */
+ for (i = 0; i < tab->nr_off; i++) {
+ btf_get(tab->off[i].kptr.btf);
+ if (tab->off[i].kptr.module && !try_module_get(tab->off[i].kptr.module)) {
+ while (i--) {
+ if (tab->off[i].kptr.module)
+ module_put(tab->off[i].kptr.module);
+ btf_put(tab->off[i].kptr.btf);
+ }
+ kfree(new_tab);
+ return ERR_PTR(-ENXIO);
+ }
+ }
+ return new_tab;
+}
+
+bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b)
+{
+ struct bpf_map_value_off *tab_a = map_a->kptr_off_tab, *tab_b = map_b->kptr_off_tab;
+ bool a_has_kptr = map_value_has_kptrs(map_a), b_has_kptr = map_value_has_kptrs(map_b);
+ int size;
+
+ if (!a_has_kptr && !b_has_kptr)
+ return true;
+ if (a_has_kptr != b_has_kptr)
+ return false;
+ if (tab_a->nr_off != tab_b->nr_off)
+ return false;
+ size = offsetof(struct bpf_map_value_off, off[tab_a->nr_off]);
+ return !memcmp(tab_a, tab_b, size);
+}
+
+/* Caller must ensure map_value_has_kptrs is true. Note that this function can
+ * be called on a map value while the map_value is visible to BPF programs, as
+ * it ensures the correct synchronization, and we already enforce the same using
+ * the bpf_kptr_xchg helper on the BPF program side for referenced kptrs.
+ */
+void bpf_map_free_kptrs(struct bpf_map *map, void *map_value)
+{
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ unsigned long *btf_id_ptr;
+ int i;
+
+ for (i = 0; i < tab->nr_off; i++) {
+ struct bpf_map_value_off_desc *off_desc = &tab->off[i];
+ unsigned long old_ptr;
+
+ btf_id_ptr = map_value + off_desc->offset;
+ if (off_desc->type == BPF_KPTR_UNREF) {
+ u64 *p = (u64 *)btf_id_ptr;
+
+ WRITE_ONCE(p, 0);
+ continue;
+ }
+ old_ptr = xchg(btf_id_ptr, 0);
+ off_desc->kptr.dtor((void *)old_ptr);
+ }
+}
+
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
security_bpf_map_free(map);
+ kfree(map->off_arr);
bpf_map_release_memcg(map);
- /* implementation dependent freeing */
+ /* implementation dependent freeing, map_free callback also does
+ * bpf_map_free_kptr_off_tab, if needed.
+ */
map->ops->map_free(map);
}
@@ -640,7 +756,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
int err;
if (!map->ops->map_mmap || map_value_has_spin_lock(map) ||
- map_value_has_timer(map))
+ map_value_has_timer(map) || map_value_has_kptrs(map))
return -ENOTSUPP;
if (!(vma->vm_flags & VM_SHARED))
@@ -767,6 +883,84 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP;
}
+static int map_off_arr_cmp(const void *_a, const void *_b, const void *priv)
+{
+ const u32 a = *(const u32 *)_a;
+ const u32 b = *(const u32 *)_b;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ return 0;
+}
+
+static void map_off_arr_swap(void *_a, void *_b, int size, const void *priv)
+{
+ struct bpf_map *map = (struct bpf_map *)priv;
+ u32 *off_base = map->off_arr->field_off;
+ u32 *a = _a, *b = _b;
+ u8 *sz_a, *sz_b;
+
+ sz_a = map->off_arr->field_sz + (a - off_base);
+ sz_b = map->off_arr->field_sz + (b - off_base);
+
+ swap(*a, *b);
+ swap(*sz_a, *sz_b);
+}
+
+static int bpf_map_alloc_off_arr(struct bpf_map *map)
+{
+ bool has_spin_lock = map_value_has_spin_lock(map);
+ bool has_timer = map_value_has_timer(map);
+ bool has_kptrs = map_value_has_kptrs(map);
+ struct bpf_map_off_arr *off_arr;
+ u32 i;
+
+ if (!has_spin_lock && !has_timer && !has_kptrs) {
+ map->off_arr = NULL;
+ return 0;
+ }
+
+ off_arr = kmalloc(sizeof(*map->off_arr), GFP_KERNEL | __GFP_NOWARN);
+ if (!off_arr)
+ return -ENOMEM;
+ map->off_arr = off_arr;
+
+ off_arr->cnt = 0;
+ if (has_spin_lock) {
+ i = off_arr->cnt;
+
+ off_arr->field_off[i] = map->spin_lock_off;
+ off_arr->field_sz[i] = sizeof(struct bpf_spin_lock);
+ off_arr->cnt++;
+ }
+ if (has_timer) {
+ i = off_arr->cnt;
+
+ off_arr->field_off[i] = map->timer_off;
+ off_arr->field_sz[i] = sizeof(struct bpf_timer);
+ off_arr->cnt++;
+ }
+ if (has_kptrs) {
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ u32 *off = &off_arr->field_off[off_arr->cnt];
+ u8 *sz = &off_arr->field_sz[off_arr->cnt];
+
+ for (i = 0; i < tab->nr_off; i++) {
+ *off++ = tab->off[i].offset;
+ *sz++ = sizeof(u64);
+ }
+ off_arr->cnt += tab->nr_off;
+ }
+
+ if (off_arr->cnt == 1)
+ return 0;
+ sort_r(off_arr->field_off, off_arr->cnt, sizeof(off_arr->field_off[0]),
+ map_off_arr_cmp, map_off_arr_swap, map);
+ return 0;
+}
+
static int map_check_btf(struct bpf_map *map, const struct btf *btf,
u32 btf_key_id, u32 btf_value_id)
{
@@ -820,10 +1014,34 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
return -EOPNOTSUPP;
}
- if (map->ops->map_check_btf)
+ map->kptr_off_tab = btf_parse_kptrs(btf, value_type);
+ if (map_value_has_kptrs(map)) {
+ if (!bpf_capable()) {
+ ret = -EPERM;
+ goto free_map_tab;
+ }
+ if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
+ ret = -EACCES;
+ goto free_map_tab;
+ }
+ if (map->map_type != BPF_MAP_TYPE_HASH &&
+ map->map_type != BPF_MAP_TYPE_LRU_HASH &&
+ map->map_type != BPF_MAP_TYPE_ARRAY) {
+ ret = -EOPNOTSUPP;
+ goto free_map_tab;
+ }
+ }
+
+ if (map->ops->map_check_btf) {
ret = map->ops->map_check_btf(map, btf, key_type, value_type);
+ if (ret < 0)
+ goto free_map_tab;
+ }
return ret;
+free_map_tab:
+ bpf_map_free_kptr_off_tab(map);
+ return ret;
}
#define BPF_MAP_CREATE_LAST_FIELD map_extra
@@ -912,10 +1130,14 @@ static int map_create(union bpf_attr *attr)
attr->btf_vmlinux_value_type_id;
}
- err = security_bpf_map_alloc(map);
+ err = bpf_map_alloc_off_arr(map);
if (err)
goto free_map;
+ err = security_bpf_map_alloc(map);
+ if (err)
+ goto free_map_off_arr;
+
err = bpf_map_alloc_id(map);
if (err)
goto free_map_sec;
@@ -938,6 +1160,8 @@ static int map_create(union bpf_attr *attr)
free_map_sec:
security_bpf_map_free(map);
+free_map_off_arr:
+ kfree(map->off_arr);
free_map:
btf_put(map->btf);
map->ops->map_free(map);
@@ -1639,7 +1863,7 @@ static int map_freeze(const union bpf_attr *attr)
return PTR_ERR(map);
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS ||
- map_value_has_timer(map)) {
+ map_value_has_timer(map) || map_value_has_kptrs(map)) {
fdput(f);
return -ENOTSUPP;
}
@@ -3030,66 +3254,45 @@ static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *pro
}
#endif /* CONFIG_PERF_EVENTS */
-#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
-
-static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
+static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
+ const char __user *user_tp_name)
{
struct bpf_link_primer link_primer;
struct bpf_raw_tp_link *link;
struct bpf_raw_event_map *btp;
- struct bpf_prog *prog;
const char *tp_name;
char buf[128];
int err;
- if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
- return -EINVAL;
-
- prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
switch (prog->type) {
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_EXT:
case BPF_PROG_TYPE_LSM:
- if (attr->raw_tracepoint.name) {
+ if (user_tp_name)
/* The attach point for this category of programs
* should be specified via btf_id during program load.
*/
- err = -EINVAL;
- goto out_put_prog;
- }
+ return -EINVAL;
if (prog->type == BPF_PROG_TYPE_TRACING &&
prog->expected_attach_type == BPF_TRACE_RAW_TP) {
tp_name = prog->aux->attach_func_name;
break;
}
- err = bpf_tracing_prog_attach(prog, 0, 0);
- if (err >= 0)
- return err;
- goto out_put_prog;
+ return bpf_tracing_prog_attach(prog, 0, 0);
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
- if (strncpy_from_user(buf,
- u64_to_user_ptr(attr->raw_tracepoint.name),
- sizeof(buf) - 1) < 0) {
- err = -EFAULT;
- goto out_put_prog;
- }
+ if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
+ return -EFAULT;
buf[sizeof(buf) - 1] = 0;
tp_name = buf;
break;
default:
- err = -EINVAL;
- goto out_put_prog;
+ return -EINVAL;
}
btp = bpf_get_raw_tracepoint(tp_name);
- if (!btp) {
- err = -ENOENT;
- goto out_put_prog;
- }
+ if (!btp)
+ return -ENOENT;
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
@@ -3116,11 +3319,29 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
out_put_btp:
bpf_put_raw_tracepoint(btp);
-out_put_prog:
- bpf_prog_put(prog);
return err;
}
+#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
+
+static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ int fd;
+
+ if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
+ return -EINVAL;
+
+ prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
+ if (fd < 0)
+ bpf_prog_put(prog);
+ return fd;
+}
+
static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
enum bpf_attach_type attach_type)
{
@@ -3189,7 +3410,13 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
case BPF_CGROUP_SETSOCKOPT:
return BPF_PROG_TYPE_CGROUP_SOCKOPT;
case BPF_TRACE_ITER:
+ case BPF_TRACE_RAW_TP:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
return BPF_PROG_TYPE_TRACING;
+ case BPF_LSM_MAC:
+ return BPF_PROG_TYPE_LSM;
case BPF_SK_LOOKUP:
return BPF_PROG_TYPE_SK_LOOKUP;
case BPF_XDP:
@@ -4246,21 +4473,6 @@ err_put:
return err;
}
-static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
- struct bpf_prog *prog)
-{
- if (attr->link_create.attach_type != prog->expected_attach_type)
- return -EINVAL;
-
- if (prog->expected_attach_type == BPF_TRACE_ITER)
- return bpf_iter_link_attach(attr, uattr, prog);
- else if (prog->type == BPF_PROG_TYPE_EXT)
- return bpf_tracing_prog_attach(prog,
- attr->link_create.target_fd,
- attr->link_create.target_btf_id);
- return -EINVAL;
-}
-
#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies
static int link_create(union bpf_attr *attr, bpfptr_t uattr)
{
@@ -4282,15 +4494,13 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
switch (prog->type) {
case BPF_PROG_TYPE_EXT:
- ret = tracing_bpf_link_attach(attr, uattr, prog);
- goto out;
+ break;
case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_TRACEPOINT:
if (attr->link_create.attach_type != BPF_PERF_EVENT) {
ret = -EINVAL;
goto out;
}
- ptype = prog->type;
break;
case BPF_PROG_TYPE_KPROBE:
if (attr->link_create.attach_type != BPF_PERF_EVENT &&
@@ -4298,7 +4508,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
ret = -EINVAL;
goto out;
}
- ptype = prog->type;
break;
default:
ptype = attach_type_to_prog_type(attr->link_create.attach_type);
@@ -4309,7 +4518,7 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
break;
}
- switch (ptype) {
+ switch (prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
@@ -4319,8 +4528,25 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
ret = cgroup_bpf_link_attach(attr, prog);
break;
+ case BPF_PROG_TYPE_EXT:
+ ret = bpf_tracing_prog_attach(prog,
+ attr->link_create.target_fd,
+ attr->link_create.target_btf_id);
+ break;
+ case BPF_PROG_TYPE_LSM:
case BPF_PROG_TYPE_TRACING:
- ret = tracing_bpf_link_attach(attr, uattr, prog);
+ if (attr->link_create.attach_type != prog->expected_attach_type) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ ret = bpf_raw_tp_link_attach(prog, NULL);
+ else if (prog->expected_attach_type == BPF_TRACE_ITER)
+ ret = bpf_iter_link_attach(attr, uattr, prog);
+ else
+ ret = bpf_tracing_prog_attach(prog,
+ attr->link_create.target_fd,
+ attr->link_create.target_btf_id);
break;
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_SK_LOOKUP:
@@ -4908,3 +5134,90 @@ const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
const struct bpf_prog_ops bpf_syscall_prog_ops = {
.test_run = bpf_prog_test_run_syscall,
};
+
+#ifdef CONFIG_SYSCTL
+static int bpf_stats_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct static_key *key = (struct static_key *)table->data;
+ static int saved_val;
+ int val, ret;
+ struct ctl_table tmp = {
+ .data = &val,
+ .maxlen = sizeof(val),
+ .mode = table->mode,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ };
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&bpf_stats_enabled_mutex);
+ val = saved_val;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret && val != saved_val) {
+ if (val)
+ static_key_slow_inc(key);
+ else
+ static_key_slow_dec(key);
+ saved_val = val;
+ }
+ mutex_unlock(&bpf_stats_enabled_mutex);
+ return ret;
+}
+
+void __weak unpriv_ebpf_notify(int new_state)
+{
+}
+
+static int bpf_unpriv_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, unpriv_enable = *(int *)table->data;
+ bool locked_state = unpriv_enable == 1;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &unpriv_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ if (locked_state && unpriv_enable != 1)
+ return -EPERM;
+ *(int *)table->data = unpriv_enable;
+ }
+
+ unpriv_ebpf_notify(unpriv_enable);
+
+ return ret;
+}
+
+static struct ctl_table bpf_syscall_table[] = {
+ {
+ .procname = "unprivileged_bpf_disabled",
+ .data = &sysctl_unprivileged_bpf_disabled,
+ .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
+ .mode = 0644,
+ .proc_handler = bpf_unpriv_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "bpf_stats_enabled",
+ .data = &bpf_stats_enabled_key.key,
+ .maxlen = sizeof(bpf_stats_enabled_key),
+ .mode = 0644,
+ .proc_handler = bpf_stats_handler,
+ },
+ { }
+};
+
+static int __init bpf_syscall_sysctl_init(void)
+{
+ register_sysctl_init("kernel", bpf_syscall_table);
+ return 0;
+}
+late_initcall(bpf_syscall_sysctl_init);
+#endif /* CONFIG_SYSCTL */
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index d94696198ef8..8c921799def4 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -99,7 +99,6 @@ static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
if (!prog)
return 0;
- meta.seq = seq;
ctx.meta = &meta;
ctx.task = task;
return bpf_iter_run_prog(prog, &ctx);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d175b70067b3..813f6ee80419 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -245,6 +245,7 @@ struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
+ u8 release_regno;
int regno;
int access_size;
int mem_size;
@@ -257,6 +258,7 @@ struct bpf_call_arg_meta {
struct btf *ret_btf;
u32 ret_btf_id;
u32 subprogno;
+ struct bpf_map_value_off_desc *kptr_off_desc;
};
struct btf *btf_vmlinux;
@@ -471,17 +473,6 @@ static bool type_may_be_null(u32 type)
return type & PTR_MAYBE_NULL;
}
-/* Determine whether the function releases some resources allocated by another
- * function call. The first reference type argument will be assumed to be
- * released by release_reference().
- */
-static bool is_release_function(enum bpf_func_id func_id)
-{
- return func_id == BPF_FUNC_sk_release ||
- func_id == BPF_FUNC_ringbuf_submit ||
- func_id == BPF_FUNC_ringbuf_discard;
-}
-
static bool may_be_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
@@ -499,7 +490,8 @@ static bool is_acquire_function(enum bpf_func_id func_id,
if (func_id == BPF_FUNC_sk_lookup_tcp ||
func_id == BPF_FUNC_sk_lookup_udp ||
func_id == BPF_FUNC_skc_lookup_tcp ||
- func_id == BPF_FUNC_ringbuf_reserve)
+ func_id == BPF_FUNC_ringbuf_reserve ||
+ func_id == BPF_FUNC_kptr_xchg)
return true;
if (func_id == BPF_FUNC_map_lookup_elem &&
@@ -575,6 +567,8 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
strncpy(prefix, "user_", 32);
if (type & MEM_PERCPU)
strncpy(prefix, "percpu_", 32);
+ if (type & PTR_UNTRUSTED)
+ strncpy(prefix, "untrusted_", 32);
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
prefix, str[base_type(type)], postfix);
@@ -3211,7 +3205,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
return 0;
}
-enum stack_access_src {
+enum bpf_access_src {
ACCESS_DIRECT = 1, /* the access is performed by an instruction */
ACCESS_HELPER = 2, /* the access is performed by a helper */
};
@@ -3219,7 +3213,7 @@ enum stack_access_src {
static int check_stack_range_initialized(struct bpf_verifier_env *env,
int regno, int off, int access_size,
bool zero_size_allowed,
- enum stack_access_src type,
+ enum bpf_access_src type,
struct bpf_call_arg_meta *meta);
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
@@ -3469,9 +3463,175 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
return 0;
}
+static int __check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno,
+ bool fixed_off_ok)
+{
+ /* Access to this pointer-typed register or passing it to a helper
+ * is only allowed in its original, unmodified form.
+ */
+
+ if (reg->off < 0) {
+ verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
+ reg_type_str(env, reg->type), regno, reg->off);
+ return -EACCES;
+ }
+
+ if (!fixed_off_ok && reg->off) {
+ verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
+ reg_type_str(env, reg->type), regno, reg->off);
+ return -EACCES;
+ }
+
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose(env, "variable %s access var_off=%s disallowed\n",
+ reg_type_str(env, reg->type), tn_buf);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno)
+{
+ return __check_ptr_off_reg(env, reg, regno, false);
+}
+
+static int map_kptr_match_type(struct bpf_verifier_env *env,
+ struct bpf_map_value_off_desc *off_desc,
+ struct bpf_reg_state *reg, u32 regno)
+{
+ const char *targ_name = kernel_type_name(off_desc->kptr.btf, off_desc->kptr.btf_id);
+ int perm_flags = PTR_MAYBE_NULL;
+ const char *reg_name = "";
+
+ /* Only unreferenced case accepts untrusted pointers */
+ if (off_desc->type == BPF_KPTR_UNREF)
+ perm_flags |= PTR_UNTRUSTED;
+
+ if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
+ goto bad_type;
+
+ if (!btf_is_kernel(reg->btf)) {
+ verbose(env, "R%d must point to kernel BTF\n", regno);
+ return -EINVAL;
+ }
+ /* We need to verify reg->type and reg->btf, before accessing reg->btf */
+ reg_name = kernel_type_name(reg->btf, reg->btf_id);
+
+ /* For ref_ptr case, release function check should ensure we get one
+ * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
+ * normal store of unreferenced kptr, we must ensure var_off is zero.
+ * Since ref_ptr cannot be accessed directly by BPF insns, checks for
+ * reg->off and reg->ref_obj_id are not needed here.
+ */
+ if (__check_ptr_off_reg(env, reg, regno, true))
+ return -EACCES;
+
+ /* A full type match is needed, as BTF can be vmlinux or module BTF, and
+ * we also need to take into account the reg->off.
+ *
+ * We want to support cases like:
+ *
+ * struct foo {
+ * struct bar br;
+ * struct baz bz;
+ * };
+ *
+ * struct foo *v;
+ * v = func(); // PTR_TO_BTF_ID
+ * val->foo = v; // reg->off is zero, btf and btf_id match type
+ * val->bar = &v->br; // reg->off is still zero, but we need to retry with
+ * // first member type of struct after comparison fails
+ * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
+ * // to match type
+ *
+ * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
+ * is zero. We must also ensure that btf_struct_ids_match does not walk
+ * the struct to match type against first member of struct, i.e. reject
+ * second case from above. Hence, when type is BPF_KPTR_REF, we set
+ * strict mode to true for type match.
+ */
+ if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
+ off_desc->kptr.btf, off_desc->kptr.btf_id,
+ off_desc->type == BPF_KPTR_REF))
+ goto bad_type;
+ return 0;
+bad_type:
+ verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
+ reg_type_str(env, reg->type), reg_name);
+ verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
+ if (off_desc->type == BPF_KPTR_UNREF)
+ verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
+ targ_name);
+ else
+ verbose(env, "\n");
+ return -EINVAL;
+}
+
+static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
+ int value_regno, int insn_idx,
+ struct bpf_map_value_off_desc *off_desc)
+{
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ int class = BPF_CLASS(insn->code);
+ struct bpf_reg_state *val_reg;
+
+ /* Things we already checked for in check_map_access and caller:
+ * - Reject cases where variable offset may touch kptr
+ * - size of access (must be BPF_DW)
+ * - tnum_is_const(reg->var_off)
+ * - off_desc->offset == off + reg->var_off.value
+ */
+ /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
+ if (BPF_MODE(insn->code) != BPF_MEM) {
+ verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
+ return -EACCES;
+ }
+
+ /* We only allow loading referenced kptr, since it will be marked as
+ * untrusted, similar to unreferenced kptr.
+ */
+ if (class != BPF_LDX && off_desc->type == BPF_KPTR_REF) {
+ verbose(env, "store to referenced kptr disallowed\n");
+ return -EACCES;
+ }
+
+ if (class == BPF_LDX) {
+ val_reg = reg_state(env, value_regno);
+ /* We can simply mark the value_regno receiving the pointer
+ * value from map as PTR_TO_BTF_ID, with the correct type.
+ */
+ mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->kptr.btf,
+ off_desc->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
+ /* For mark_ptr_or_null_reg */
+ val_reg->id = ++env->id_gen;
+ } else if (class == BPF_STX) {
+ val_reg = reg_state(env, value_regno);
+ if (!register_is_null(val_reg) &&
+ map_kptr_match_type(env, off_desc, val_reg, value_regno))
+ return -EACCES;
+ } else if (class == BPF_ST) {
+ if (insn->imm) {
+ verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
+ off_desc->offset);
+ return -EACCES;
+ }
+ } else {
+ verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
+ return -EACCES;
+ }
+ return 0;
+}
+
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size, bool zero_size_allowed)
+ int off, int size, bool zero_size_allowed,
+ enum bpf_access_src src)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
@@ -3507,6 +3667,36 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
return -EACCES;
}
}
+ if (map_value_has_kptrs(map)) {
+ struct bpf_map_value_off *tab = map->kptr_off_tab;
+ int i;
+
+ for (i = 0; i < tab->nr_off; i++) {
+ u32 p = tab->off[i].offset;
+
+ if (reg->smin_value + off < p + sizeof(u64) &&
+ p < reg->umax_value + off + size) {
+ if (src != ACCESS_DIRECT) {
+ verbose(env, "kptr cannot be accessed indirectly by helper\n");
+ return -EACCES;
+ }
+ if (!tnum_is_const(reg->var_off)) {
+ verbose(env, "kptr access cannot have variable offset\n");
+ return -EACCES;
+ }
+ if (p != off + reg->var_off.value) {
+ verbose(env, "kptr access misaligned expected=%u off=%llu\n",
+ p, off + reg->var_off.value);
+ return -EACCES;
+ }
+ if (size != bpf_size_to_bytes(BPF_DW)) {
+ verbose(env, "kptr access size must be BPF_DW\n");
+ return -EACCES;
+ }
+ break;
+ }
+ }
+ }
return err;
}
@@ -3980,44 +4170,6 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
}
#endif
-static int __check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
- bool fixed_off_ok)
-{
- /* Access to this pointer-typed register or passing it to a helper
- * is only allowed in its original, unmodified form.
- */
-
- if (reg->off < 0) {
- verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
- reg_type_str(env, reg->type), regno, reg->off);
- return -EACCES;
- }
-
- if (!fixed_off_ok && reg->off) {
- verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
- reg_type_str(env, reg->type), regno, reg->off);
- return -EACCES;
- }
-
- if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
- char tn_buf[48];
-
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "variable %s access var_off=%s disallowed\n",
- reg_type_str(env, reg->type), tn_buf);
- return -EACCES;
- }
-
- return 0;
-}
-
-int check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno)
-{
- return __check_ptr_off_reg(env, reg, regno, false);
-}
-
static int __check_buffer_access(struct bpf_verifier_env *env,
const char *buf_info,
const struct bpf_reg_state *reg,
@@ -4224,6 +4376,12 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
+ /* If this is an untrusted pointer, all pointers formed by walking it
+ * also inherit the untrusted flag.
+ */
+ if (type_flag(reg->type) & PTR_UNTRUSTED)
+ flag |= PTR_UNTRUSTED;
+
if (atype == BPF_READ && value_regno >= 0)
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
@@ -4316,7 +4474,7 @@ static int check_stack_slot_within_bounds(int off,
static int check_stack_access_within_bounds(
struct bpf_verifier_env *env,
int regno, int off, int access_size,
- enum stack_access_src src, enum bpf_access_type type)
+ enum bpf_access_src src, enum bpf_access_type type)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
@@ -4412,6 +4570,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_MAP_VALUE) {
+ struct bpf_map_value_off_desc *kptr_off_desc = NULL;
+
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into map\n", value_regno);
@@ -4420,8 +4580,16 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
err = check_map_access_type(env, regno, off, size, t);
if (err)
return err;
- err = check_map_access(env, regno, off, size, false);
- if (!err && t == BPF_READ && value_regno >= 0) {
+ err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
+ if (err)
+ return err;
+ if (tnum_is_const(reg->var_off))
+ kptr_off_desc = bpf_map_kptr_off_contains(reg->map_ptr,
+ off + reg->var_off.value);
+ if (kptr_off_desc) {
+ err = check_map_kptr_access(env, regno, value_regno, insn_idx,
+ kptr_off_desc);
+ } else if (t == BPF_READ && value_regno >= 0) {
struct bpf_map *map = reg->map_ptr;
/* if map is read-only, track its contents as scalars */
@@ -4724,7 +4892,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
static int check_stack_range_initialized(
struct bpf_verifier_env *env, int regno, int off,
int access_size, bool zero_size_allowed,
- enum stack_access_src type, struct bpf_call_arg_meta *meta)
+ enum bpf_access_src type, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
@@ -4861,6 +5029,11 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return check_packet_access(env, regno, reg->off, access_size,
zero_size_allowed);
case PTR_TO_MAP_KEY:
+ if (meta && meta->raw_mode) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
+ return -EACCES;
+ }
return check_mem_region_access(env, regno, reg->off, access_size,
reg->map_ptr->key_size, false);
case PTR_TO_MAP_VALUE:
@@ -4869,15 +5042,25 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
BPF_READ))
return -EACCES;
return check_map_access(env, regno, reg->off, access_size,
- zero_size_allowed);
+ zero_size_allowed, ACCESS_HELPER);
case PTR_TO_MEM:
+ if (type_is_rdonly_mem(reg->type)) {
+ if (meta && meta->raw_mode) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
+ return -EACCES;
+ }
+ }
return check_mem_region_access(env, regno, reg->off,
access_size, reg->mem_size,
zero_size_allowed);
case PTR_TO_BUF:
if (type_is_rdonly_mem(reg->type)) {
- if (meta && meta->raw_mode)
+ if (meta && meta->raw_mode) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
return -EACCES;
+ }
max_access = &env->prog->aux->max_rdonly_access;
} else {
@@ -4919,8 +5102,7 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
* out. Only upper bounds can be learned because retval is an
* int type and negative retvals are allowed.
*/
- if (meta)
- meta->msize_max_value = reg->umax_value;
+ meta->msize_max_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
@@ -4963,24 +5145,33 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size)
{
+ bool may_be_null = type_may_be_null(reg->type);
+ struct bpf_reg_state saved_reg;
+ struct bpf_call_arg_meta meta;
+ int err;
+
if (register_is_null(reg))
return 0;
- if (type_may_be_null(reg->type)) {
- /* Assuming that the register contains a value check if the memory
- * access is safe. Temporarily save and restore the register's state as
- * the conversion shouldn't be visible to a caller.
- */
- const struct bpf_reg_state saved_reg = *reg;
- int rv;
-
+ memset(&meta, 0, sizeof(meta));
+ /* Assuming that the register contains a value check if the memory
+ * access is safe. Temporarily save and restore the register's state as
+ * the conversion shouldn't be visible to a caller.
+ */
+ if (may_be_null) {
+ saved_reg = *reg;
mark_ptr_not_null_reg(reg);
- rv = check_helper_mem_access(env, regno, mem_size, true, NULL);
- *reg = saved_reg;
- return rv;
}
- return check_helper_mem_access(env, regno, mem_size, true, NULL);
+ err = check_helper_mem_access(env, regno, mem_size, true, &meta);
+ /* Check access for BPF_WRITE */
+ meta.raw_mode = true;
+ err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
+
+ if (may_be_null)
+ *reg = saved_reg;
+
+ return err;
}
int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
@@ -4989,16 +5180,22 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state
struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
bool may_be_null = type_may_be_null(mem_reg->type);
struct bpf_reg_state saved_reg;
+ struct bpf_call_arg_meta meta;
int err;
WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
+ memset(&meta, 0, sizeof(meta));
+
if (may_be_null) {
saved_reg = *mem_reg;
mark_ptr_not_null_reg(mem_reg);
}
- err = check_mem_size_reg(env, reg, regno, true, NULL);
+ err = check_mem_size_reg(env, reg, regno, true, &meta);
+ /* Check access for BPF_WRITE */
+ meta.raw_mode = true;
+ err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
if (may_be_null)
*mem_reg = saved_reg;
@@ -5134,6 +5331,53 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
return 0;
}
+static int process_kptr_func(struct bpf_verifier_env *env, int regno,
+ struct bpf_call_arg_meta *meta)
+{
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ struct bpf_map_value_off_desc *off_desc;
+ struct bpf_map *map_ptr = reg->map_ptr;
+ u32 kptr_off;
+ int ret;
+
+ if (!tnum_is_const(reg->var_off)) {
+ verbose(env,
+ "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
+ regno);
+ return -EINVAL;
+ }
+ if (!map_ptr->btf) {
+ verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
+ map_ptr->name);
+ return -EINVAL;
+ }
+ if (!map_value_has_kptrs(map_ptr)) {
+ ret = PTR_ERR(map_ptr->kptr_off_tab);
+ if (ret == -E2BIG)
+ verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name,
+ BPF_MAP_VALUE_OFF_MAX);
+ else if (ret == -EEXIST)
+ verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name);
+ else
+ verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
+ return -EINVAL;
+ }
+
+ meta->map_ptr = map_ptr;
+ kptr_off = reg->off + reg->var_off.value;
+ off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off);
+ if (!off_desc) {
+ verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
+ return -EACCES;
+ }
+ if (off_desc->type != BPF_KPTR_REF) {
+ verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
+ return -EACCES;
+ }
+ meta->kptr_off_desc = off_desc;
+ return 0;
+}
+
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
{
return base_type(type) == ARG_PTR_TO_MEM ||
@@ -5157,6 +5401,11 @@ static bool arg_type_is_int_ptr(enum bpf_arg_type type)
type == ARG_PTR_TO_LONG;
}
+static bool arg_type_is_release(enum bpf_arg_type type)
+{
+ return type & OBJ_RELEASE;
+}
+
static int int_ptr_type_to_size(enum bpf_arg_type type)
{
if (type == ARG_PTR_TO_INT)
@@ -5269,6 +5518,7 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
+static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
@@ -5296,11 +5546,13 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_STACK] = &stack_ptr_types,
[ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
[ARG_PTR_TO_TIMER] = &timer_types,
+ [ARG_PTR_TO_KPTR] = &kptr_types,
};
static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
- const u32 *arg_btf_id)
+ const u32 *arg_btf_id,
+ struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_reg_type expected, type = reg->type;
@@ -5345,6 +5597,13 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
found:
if (reg->type == PTR_TO_BTF_ID) {
+ /* For bpf_sk_release, it needs to match against first member
+ * 'struct sock_common', hence make an exception for it. This
+ * allows bpf_sk_release to work for multiple socket types.
+ */
+ bool strict_type_match = arg_type_is_release(arg_type) &&
+ meta->func_id != BPF_FUNC_sk_release;
+
if (!arg_btf_id) {
if (!compatible->btf_id) {
verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
@@ -5353,8 +5612,12 @@ found:
arg_btf_id = compatible->btf_id;
}
- if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
- btf_vmlinux, *arg_btf_id)) {
+ if (meta->func_id == BPF_FUNC_kptr_xchg) {
+ if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno))
+ return -EACCES;
+ } else if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
+ btf_vmlinux, *arg_btf_id,
+ strict_type_match)) {
verbose(env, "R%d is of type %s but %s is expected\n",
regno, kernel_type_name(reg->btf, reg->btf_id),
kernel_type_name(btf_vmlinux, *arg_btf_id));
@@ -5367,11 +5630,10 @@ found:
int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
- enum bpf_arg_type arg_type,
- bool is_release_func)
+ enum bpf_arg_type arg_type)
{
- bool fixed_off_ok = false, release_reg;
enum bpf_reg_type type = reg->type;
+ bool fixed_off_ok = false;
switch ((u32)type) {
case SCALAR_VALUE:
@@ -5389,7 +5651,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
/* Some of the argument types nevertheless require a
* zero register offset.
*/
- if (arg_type != ARG_PTR_TO_ALLOC_MEM)
+ if (base_type(arg_type) != ARG_PTR_TO_ALLOC_MEM)
return 0;
break;
/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
@@ -5397,19 +5659,17 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
*/
case PTR_TO_BTF_ID:
/* When referenced PTR_TO_BTF_ID is passed to release function,
- * it's fixed offset must be 0. We rely on the property that
- * only one referenced register can be passed to BPF helpers and
- * kfuncs. In the other cases, fixed offset can be non-zero.
+ * it's fixed offset must be 0. In the other cases, fixed offset
+ * can be non-zero.
*/
- release_reg = is_release_func && reg->ref_obj_id;
- if (release_reg && reg->off) {
+ if (arg_type_is_release(arg_type) && reg->off) {
verbose(env, "R%d must have zero offset when passed to release func\n",
regno);
return -EINVAL;
}
- /* For release_reg == true, fixed_off_ok must be false, but we
- * already checked and rejected reg->off != 0 above, so set to
- * true to allow fixed offset for all other cases.
+ /* For arg is release pointer, fixed_off_ok must be false, but
+ * we already checked and rejected reg->off != 0 above, so set
+ * to true to allow fixed offset for all other cases.
*/
fixed_off_ok = true;
break;
@@ -5464,18 +5724,28 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
*/
goto skip_type_check;
- err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
+ err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
if (err)
return err;
- err = check_func_arg_reg_off(env, reg, regno, arg_type, is_release_function(meta->func_id));
+ err = check_func_arg_reg_off(env, reg, regno, arg_type);
if (err)
return err;
skip_type_check:
- /* check_func_arg_reg_off relies on only one referenced register being
- * allowed for BPF helpers.
- */
+ if (arg_type_is_release(arg_type)) {
+ if (!reg->ref_obj_id && !register_is_null(reg)) {
+ verbose(env, "R%d must be referenced when passed to release function\n",
+ regno);
+ return -EINVAL;
+ }
+ if (meta->release_regno) {
+ verbose(env, "verifier internal error: more than one release argument\n");
+ return -EFAULT;
+ }
+ meta->release_regno = regno;
+ }
+
if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
@@ -5613,7 +5883,8 @@ skip_type_check:
}
err = check_map_access(env, regno, reg->off,
- map->value_size - reg->off, false);
+ map->value_size - reg->off, false,
+ ACCESS_HELPER);
if (err)
return err;
@@ -5629,6 +5900,9 @@ skip_type_check:
verbose(env, "string is not zero-terminated\n");
return -EINVAL;
}
+ } else if (arg_type == ARG_PTR_TO_KPTR) {
+ if (process_kptr_func(env, regno, meta))
+ return -EACCES;
}
return err;
@@ -5971,17 +6245,18 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
int i;
for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
- if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
+ if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
return false;
- if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
+ if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
return false;
}
return true;
}
-static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
+static int check_func_proto(const struct bpf_func_proto *fn, int func_id,
+ struct bpf_call_arg_meta *meta)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
@@ -6665,7 +6940,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
- err = check_func_proto(fn, func_id);
+ err = check_func_proto(fn, func_id, &meta);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
@@ -6698,8 +6973,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return err;
}
- if (is_release_function(func_id)) {
- err = release_reference(env, meta.ref_obj_id);
+ regs = cur_regs(env);
+
+ if (meta.release_regno) {
+ err = -EINVAL;
+ if (meta.ref_obj_id)
+ err = release_reference(env, meta.ref_obj_id);
+ /* meta.ref_obj_id can only be 0 if register that is meant to be
+ * released is NULL, which must be > R0.
+ */
+ else if (register_is_null(&regs[meta.release_regno]))
+ err = 0;
if (err) {
verbose(env, "func %s#%d reference has not been acquired before\n",
func_id_name(func_id), func_id);
@@ -6707,8 +6991,6 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
}
}
- regs = cur_regs(env);
-
switch (func_id) {
case BPF_FUNC_tail_call:
err = check_reference_leak(env);
@@ -6832,21 +7114,25 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
}
} else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
+ struct btf *ret_btf;
int ret_btf_id;
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
- ret_btf_id = *fn->ret_btf_id;
+ if (func_id == BPF_FUNC_kptr_xchg) {
+ ret_btf = meta.kptr_off_desc->kptr.btf;
+ ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
+ } else {
+ ret_btf = btf_vmlinux;
+ ret_btf_id = *fn->ret_btf_id;
+ }
if (ret_btf_id == 0) {
verbose(env, "invalid return type %u of func %s#%d\n",
base_type(ret_type), func_id_name(func_id),
func_id);
return -EINVAL;
}
- /* current BPF helper definitions are only coming from
- * built-in code with type IDs from vmlinux BTF
- */
- regs[BPF_REG_0].btf = btf_vmlinux;
+ regs[BPF_REG_0].btf = ret_btf;
regs[BPF_REG_0].btf_id = ret_btf_id;
} else {
verbose(env, "unknown return type %u of func %s#%d\n",
@@ -7433,7 +7719,7 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
return -EACCES;
break;
case PTR_TO_MAP_VALUE:
- if (check_map_access(env, dst, dst_reg->off, 1, false)) {
+ if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
@@ -12822,7 +13108,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (!ctx_access)
continue;
- switch (env->insn_aux_data[i + delta].ptr_type) {
+ switch ((int)env->insn_aux_data[i + delta].ptr_type) {
case PTR_TO_CTX:
if (!ops->convert_ctx_access)
continue;
@@ -12839,6 +13125,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
break;
case PTR_TO_BTF_ID:
+ case PTR_TO_BTF_ID | PTR_UNTRUSTED:
if (type == BPF_READ) {
insn->code = BPF_LDX | BPF_PROBE_MEM |
BPF_SIZE((insn)->code);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 830aaf8ca08e..47139877f62d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -62,7 +62,6 @@
#include <linux/binfmts.h>
#include <linux/sched/sysctl.h>
#include <linux/kexec.h>
-#include <linux/bpf.h>
#include <linux/mount.h>
#include <linux/userfaultfd_k.h>
#include <linux/latencytop.h>
@@ -148,66 +147,6 @@ static const int max_extfrag_threshold = 1000;
#endif /* CONFIG_SYSCTL */
-#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
-static int bpf_stats_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct static_key *key = (struct static_key *)table->data;
- static int saved_val;
- int val, ret;
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(val),
- .mode = table->mode,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- };
-
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- mutex_lock(&bpf_stats_enabled_mutex);
- val = saved_val;
- ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
- if (write && !ret && val != saved_val) {
- if (val)
- static_key_slow_inc(key);
- else
- static_key_slow_dec(key);
- saved_val = val;
- }
- mutex_unlock(&bpf_stats_enabled_mutex);
- return ret;
-}
-
-void __weak unpriv_ebpf_notify(int new_state)
-{
-}
-
-static int bpf_unpriv_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- int ret, unpriv_enable = *(int *)table->data;
- bool locked_state = unpriv_enable == 1;
- struct ctl_table tmp = *table;
-
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- tmp.data = &unpriv_enable;
- ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
- if (write && !ret) {
- if (locked_state && unpriv_enable != 1)
- return -EPERM;
- *(int *)table->data = unpriv_enable;
- }
-
- unpriv_ebpf_notify(unpriv_enable);
-
- return ret;
-}
-#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
-
/*
* /proc/sys support
*/
@@ -2299,24 +2238,6 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
-#ifdef CONFIG_BPF_SYSCALL
- {
- .procname = "unprivileged_bpf_disabled",
- .data = &sysctl_unprivileged_bpf_disabled,
- .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
- .mode = 0644,
- .proc_handler = bpf_unpriv_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_TWO,
- },
- {
- .procname = "bpf_stats_enabled",
- .data = &bpf_stats_enabled_key.key,
- .maxlen = sizeof(bpf_stats_enabled_key),
- .mode = 0644,
- .proc_handler = bpf_stats_handler,
- },
-#endif
#if defined(CONFIG_TREE_RCU)
{
.procname = "panic_on_rcu_stall",
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d8553f46caa2..f15b826f9899 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -129,7 +129,10 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* out of events when it was updated in between this and the
* rcu_dereference() which is accepted risk.
*/
- ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run);
+ rcu_read_lock();
+ ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
+ ctx, bpf_prog_run);
+ rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
@@ -2254,15 +2257,13 @@ static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void
const struct bpf_kprobe_multi_link *link = priv;
unsigned long *addr_a = a, *addr_b = b;
u64 *cookie_a, *cookie_b;
- unsigned long tmp1;
- u64 tmp2;
cookie_a = link->cookies + (addr_a - link->addrs);
cookie_b = link->cookies + (addr_b - link->addrs);
/* swap addr_a/addr_b and cookie_a/cookie_b values */
- tmp1 = *addr_a; *addr_a = *addr_b; *addr_b = tmp1;
- tmp2 = *cookie_a; *cookie_a = *cookie_b; *cookie_b = tmp2;
+ swap(*addr_a, *addr_b);
+ swap(*cookie_a, *cookie_b);
}
static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0c5cb2d6436a..2a7836e115b4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -53,6 +53,7 @@
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = {
},
/* BPF_LDX_MEM B/H/W/DW */
{
- "BPF_LDX_MEM | BPF_B",
+ "BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
@@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_H",
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
@@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_W",
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
@@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
.stack_depth = 8,
},
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)
@@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = {
.result = 10,
},
{
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
"Tail call error path, max count reached",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index a5a3d6c27e1f..9a564971f539 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -38,6 +38,7 @@
static int i_zero;
static int i_one_hundred = 100;
+static int match_int_ok = 1;
struct test_sysctl_data {
int int_0001;
@@ -96,6 +97,13 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "match_int",
+ .data = &match_int_ok,
+ .maxlen = sizeof(match_int_ok),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "boot_int",
.data = &test_data.boot_int,
.maxlen = sizeof(test_data.boot_int),
@@ -132,6 +140,30 @@ static struct ctl_table_header *test_sysctl_header;
static int __init test_sysctl_init(void)
{
+ int i;
+
+ struct {
+ int defined;
+ int wanted;
+ } match_int[] = {
+ {.defined = *(int *)SYSCTL_ZERO, .wanted = 0},
+ {.defined = *(int *)SYSCTL_ONE, .wanted = 1},
+ {.defined = *(int *)SYSCTL_TWO, .wanted = 2},
+ {.defined = *(int *)SYSCTL_THREE, .wanted = 3},
+ {.defined = *(int *)SYSCTL_FOUR, .wanted = 4},
+ {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100},
+ {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200},
+ {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000},
+ {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000},
+ {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX},
+ {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535},
+ {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(match_int); i++)
+ if (match_int[i].defined != match_int[i].wanted)
+ match_int_ok = 0;
+
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bf5736c1d458..a06f4d4a6f47 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1753,8 +1753,7 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int err = 0;
struct sk_buff *skb;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
lock_sock(sk);
if (!skb)
diff --git a/net/atm/common.c b/net/atm/common.c
index 1cfa9bf1d187..f7019df41c3e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -540,7 +540,7 @@ int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
+ skb = skb_recv_datagram(sk, flags, &error);
if (!skb)
return error;
@@ -553,7 +553,7 @@ int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
error = skb_copy_datagram_msg(skb, 0, msg, copied);
if (error)
return error;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 363d47f94532..116481e4da82 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1669,8 +1669,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
/* Now we can treat all alike */
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a0cb2e3da8d4..b506409bb498 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -251,7 +251,6 @@ EXPORT_SYMBOL(bt_accept_dequeue);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
@@ -263,7 +262,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
@@ -281,7 +280,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_reset_transport_header(skb);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err == 0) {
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name && bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
@@ -385,7 +384,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
copied += chunk;
size -= chunk;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
int skb_len = skb_headlen(skb);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 33b3c0ffc339..189e3115c8c6 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1453,7 +1453,6 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
@@ -1470,7 +1469,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
if (sk->sk_state == BT_CLOSED)
return 0;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index af709c182674..8d54fef9a568 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -551,8 +551,13 @@ struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
return sk;
}
+struct prog_test_member1 {
+ int a;
+};
+
struct prog_test_member {
- u64 c;
+ struct prog_test_member1 m;
+ int c;
};
struct prog_test_ref_kfunc {
@@ -577,6 +582,12 @@ bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
return &prog_test_struct;
}
+noinline struct prog_test_member *
+bpf_kfunc_call_memb_acquire(void)
+{
+ return &prog_test_struct.memb;
+}
+
noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
}
@@ -585,6 +596,16 @@ noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{
}
+noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
+{
+}
+
+noinline struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b)
+{
+ return &prog_test_struct;
+}
+
struct prog_test_pass1 {
int x0;
struct {
@@ -668,8 +689,11 @@ BTF_ID(func, bpf_kfunc_call_test1)
BTF_ID(func, bpf_kfunc_call_test2)
BTF_ID(func, bpf_kfunc_call_test3)
BTF_ID(func, bpf_kfunc_call_test_acquire)
+BTF_ID(func, bpf_kfunc_call_memb_acquire)
BTF_ID(func, bpf_kfunc_call_test_release)
BTF_ID(func, bpf_kfunc_call_memb_release)
+BTF_ID(func, bpf_kfunc_call_memb1_release)
+BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
BTF_ID(func, bpf_kfunc_call_test_pass1)
BTF_ID(func, bpf_kfunc_call_test_pass2)
@@ -683,17 +707,26 @@ BTF_SET_END(test_sk_check_kfunc_ids)
BTF_SET_START(test_sk_acquire_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_acquire)
+BTF_ID(func, bpf_kfunc_call_memb_acquire)
+BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_SET_END(test_sk_acquire_kfunc_ids)
BTF_SET_START(test_sk_release_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_release)
BTF_ID(func, bpf_kfunc_call_memb_release)
+BTF_ID(func, bpf_kfunc_call_memb1_release)
BTF_SET_END(test_sk_release_kfunc_ids)
BTF_SET_START(test_sk_ret_null_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test_acquire)
+BTF_ID(func, bpf_kfunc_call_memb_acquire)
+BTF_ID(func, bpf_kfunc_call_test_kptr_get)
BTF_SET_END(test_sk_ret_null_kfunc_ids)
+BTF_SET_START(test_sk_kptr_acquire_kfunc_ids)
+BTF_ID(func, bpf_kfunc_call_test_kptr_get)
+BTF_SET_END(test_sk_kptr_acquire_kfunc_ids)
+
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom)
{
@@ -1580,14 +1613,36 @@ out:
static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
.owner = THIS_MODULE,
- .check_set = &test_sk_check_kfunc_ids,
- .acquire_set = &test_sk_acquire_kfunc_ids,
- .release_set = &test_sk_release_kfunc_ids,
- .ret_null_set = &test_sk_ret_null_kfunc_ids,
+ .check_set = &test_sk_check_kfunc_ids,
+ .acquire_set = &test_sk_acquire_kfunc_ids,
+ .release_set = &test_sk_release_kfunc_ids,
+ .ret_null_set = &test_sk_ret_null_kfunc_ids,
+ .kptr_acquire_set = &test_sk_kptr_acquire_kfunc_ids
};
+BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
+BTF_ID(struct, prog_test_ref_kfunc)
+BTF_ID(func, bpf_kfunc_call_test_release)
+BTF_ID(struct, prog_test_member)
+BTF_ID(func, bpf_kfunc_call_memb_release)
+
static int __init bpf_prog_test_run_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
+ const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
+ {
+ .btf_id = bpf_prog_test_dtor_kfunc_ids[0],
+ .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
+ },
+ {
+ .btf_id = bpf_prog_test_dtor_kfunc_ids[2],
+ .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
+ },
+ };
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
+ return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
+ ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
+ THIS_MODULE);
}
late_initcall(bpf_prog_test_run_init);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 8d6bab244c4a..58a4f70e01e3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -465,6 +465,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fix_features = br_fix_features,
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
+ .ndo_fdb_del_bulk = br_fdb_delete_bulk,
.ndo_fdb_dump = br_fdb_dump,
.ndo_fdb_get = br_fdb_get,
.ndo_bridge_getlink = br_getlink,
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 6ccda68bd473..1a3d583fbc8e 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -558,18 +558,161 @@ void br_fdb_cleanup(struct work_struct *work)
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
-/* Completely flush all dynamic entries in forwarding database.*/
-void br_fdb_flush(struct net_bridge *br)
+static bool __fdb_flush_matches(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *f,
+ const struct net_bridge_fdb_flush_desc *desc)
+{
+ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+ int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
+
+ if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
+ return false;
+ if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
+ return false;
+ if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
+ return false;
+
+ return true;
+}
+
+/* Flush forwarding database entries matching the description */
+void br_fdb_flush(struct net_bridge *br,
+ const struct net_bridge_fdb_flush_desc *desc)
{
struct net_bridge_fdb_entry *f;
- struct hlist_node *tmp;
- spin_lock_bh(&br->hash_lock);
- hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
- if (!test_bit(BR_FDB_STATIC, &f->flags))
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ if (!__fdb_flush_matches(br, f, desc))
+ continue;
+
+ spin_lock_bh(&br->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node))
fdb_delete(br, f, true);
+ spin_unlock_bh(&br->hash_lock);
}
- spin_unlock_bh(&br->hash_lock);
+ rcu_read_unlock();
+}
+
+static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
+{
+ unsigned long flags = 0;
+
+ if (ndm_state & NUD_PERMANENT)
+ __set_bit(BR_FDB_LOCAL, &flags);
+ if (ndm_state & NUD_NOARP)
+ __set_bit(BR_FDB_STATIC, &flags);
+
+ return flags;
+}
+
+static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
+{
+ unsigned long flags = 0;
+
+ if (ndm_flags & NTF_USE)
+ __set_bit(BR_FDB_ADDED_BY_USER, &flags);
+ if (ndm_flags & NTF_EXT_LEARNED)
+ __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
+ if (ndm_flags & NTF_OFFLOADED)
+ __set_bit(BR_FDB_OFFLOADED, &flags);
+ if (ndm_flags & NTF_STICKY)
+ __set_bit(BR_FDB_STICKY, &flags);
+
+ return flags;
+}
+
+static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
+ int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ const struct net_device *dev;
+
+ dev = __dev_get_by_index(dev_net(br->dev), ifindex);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
+ return -ENODEV;
+ }
+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
+ return -EINVAL;
+ }
+ if (netif_is_bridge_master(dev) && dev != br->dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flush bridge device does not match target bridge device");
+ return -EINVAL;
+ }
+ if (netif_is_bridge_port(dev)) {
+ struct net_bridge_port *p = br_port_get_rtnl(dev);
+
+ if (p->br != br) {
+ NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ u8 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
+ struct net_bridge_fdb_flush_desc desc = { .vlan_id = vid };
+ struct net_bridge_port *p = NULL;
+ struct net_bridge *br;
+
+ if (netif_is_bridge_master(dev)) {
+ br = netdev_priv(dev);
+ } else {
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
+ return -EINVAL;
+ }
+ br = p->br;
+ }
+
+ if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
+ NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
+ return -EINVAL;
+ }
+ if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
+ NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
+ return -EINVAL;
+ }
+
+ desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
+ desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
+ if (tb[NDA_NDM_STATE_MASK]) {
+ u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
+
+ desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
+ }
+ if (tb[NDA_NDM_FLAGS_MASK]) {
+ u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
+
+ desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
+ }
+ if (tb[NDA_IFINDEX]) {
+ int err, ifidx = nla_get_s32(tb[NDA_IFINDEX]);
+
+ err = __fdb_flush_validate_ifindex(br, ifidx, extack);
+ if (err)
+ return err;
+ desc.port_ifindex = ifidx;
+ } else if (p) {
+ /* flush was invoked with port device and NTF_MASTER */
+ desc.port_ifindex = p->dev->ifindex;
+ }
+
+ br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
+ desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
+
+ br_fdb_flush(br, &desc);
+
+ return 0;
}
/* Flush all entries referring to a specific port.
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 4556d913955b..fdcc641fc89a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -251,14 +251,16 @@ static int __mdb_fill_info(struct sk_buff *skb,
__mdb_entry_fill_flags(&e, flags);
e.ifindex = ifindex;
e.vid = mp->addr.vid;
- if (mp->addr.proto == htons(ETH_P_IP))
+ if (mp->addr.proto == htons(ETH_P_IP)) {
e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- else if (mp->addr.proto == htons(ETH_P_IPV6))
+ } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
- else
+ } else {
ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
+ e.state = MDB_PG_FLAGS_PERMANENT;
+ }
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
MDBA_MDB_ENTRY_INFO);
@@ -873,8 +875,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
return -EINVAL;
/* host join errors which can happen before creating the group */
- if (!port) {
- /* don't allow any flags for host-joined groups */
+ if (!port && !br_group_is_l2(&group)) {
+ /* don't allow any flags for host-joined IP groups */
if (entry->state) {
NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
return -EINVAL;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 200ad05b296f..bb01776d2d88 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1326,8 +1326,13 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
br_recalculate_fwd_mask(br);
}
- if (data[IFLA_BR_FDB_FLUSH])
- br_fdb_flush(br);
+ if (data[IFLA_BR_FDB_FLUSH]) {
+ struct net_bridge_fdb_flush_desc desc = {
+ .flags_mask = BR_FDB_STATIC
+ };
+
+ br_fdb_flush(br, &desc);
+ }
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (data[IFLA_BR_MCAST_ROUTER]) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 18ccc3d5d296..6ae882cfae1c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -274,6 +274,13 @@ struct net_bridge_fdb_entry {
struct rcu_head rcu;
};
+struct net_bridge_fdb_flush_desc {
+ unsigned long flags;
+ unsigned long flags_mask;
+ int port_ifindex;
+ u16 vlan_id;
+};
+
#define MDB_PG_FLAGS_PERMANENT BIT(0)
#define MDB_PG_FLAGS_OFFLOAD BIT(1)
#define MDB_PG_FLAGS_FAST_LEAVE BIT(2)
@@ -755,11 +762,17 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
#endif
/* br_fdb.c */
+#define FDB_FLUSH_IGNORED_NDM_FLAGS (NTF_MASTER | NTF_SELF)
+#define FDB_FLUSH_ALLOWED_NDM_STATES (NUD_PERMANENT | NUD_NOARP)
+#define FDB_FLUSH_ALLOWED_NDM_FLAGS (NTF_USE | NTF_EXT_LEARNED | \
+ NTF_STICKY | NTF_OFFLOADED)
+
int br_fdb_init(void);
void br_fdb_fini(void);
int br_fdb_hash_init(struct net_bridge *br);
void br_fdb_hash_fini(struct net_bridge *br);
-void br_fdb_flush(struct net_bridge *br);
+void br_fdb_flush(struct net_bridge *br,
+ const struct net_bridge_fdb_flush_desc *desc);
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
@@ -781,6 +794,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid);
+int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, u16 vid,
+ struct netlink_ext_ack *extack);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags,
struct netlink_ext_ack *extack);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 18affda2b522..8f3d76c751dd 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -72,7 +72,8 @@ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
/* Flags that can be offloaded to hardware */
#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
- BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED)
+ BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
+ BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 3f7ca88c2aa3..612e367fff20 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -344,7 +344,11 @@ static DEVICE_ATTR_RW(group_addr);
static int set_flush(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
- br_fdb_flush(br);
+ struct net_bridge_fdb_flush_desc desc = {
+ .flags_mask = BR_FDB_STATIC
+ };
+
+ br_fdb_flush(br, &desc);
return 0;
}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2b8892d502f7..251e666ba9a2 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -282,7 +282,7 @@ static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m,
if (flags & MSG_OOB)
goto read_error;
- skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ skb = skb_recv_datagram(sk, flags, &ret);
if (!skb)
goto read_error;
copylen = skb->len;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 95d209b52e6a..65ee1b784a30 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1632,12 +1632,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
- int noblock;
int err;
- noblock = flags & MSG_DONTWAIT;
- flags &= ~MSG_DONTWAIT;
- skb = skb_recv_datagram(sk, flags, noblock, &error);
+ skb = skb_recv_datagram(sk, flags, &error);
if (!skb)
return error;
@@ -1650,7 +1647,7 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
return err;
}
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(BCM_MIN_NAMELEN);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 1e7c6a460ef9..35a1ae61744c 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -1055,7 +1055,6 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct isotp_sock *so = isotp_sk(sk);
- int noblock = flags & MSG_DONTWAIT;
int ret = 0;
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
@@ -1064,8 +1063,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (!so->bound)
return -EADDRNOTAVAIL;
- flags &= ~MSG_DONTWAIT;
- skb = skb_recv_datagram(sk, flags, noblock, &ret);
+ skb = skb_recv_datagram(sk, flags, &ret);
if (!skb)
return ret;
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 6dff4510687a..f5ecfdcf57b2 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -802,7 +802,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
SCM_J1939_ERRQUEUE);
- skb = skb_recv_datagram(sk, flags, 0, &ret);
+ skb = skb_recv_datagram(sk, flags, &ret);
if (!skb)
return ret;
@@ -841,7 +841,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
paddr->can_addr.j1939.pgn = skcb->addr.pgn;
}
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
msg->msg_flags |= skcb->msg_flags;
skb_free_datagram(sk, skb);
diff --git a/net/can/raw.c b/net/can/raw.c
index 7105fa4824e4..b7dbb57557f3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -846,16 +846,12 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = 0;
- int noblock;
-
- noblock = flags & MSG_DONTWAIT;
- flags &= ~MSG_DONTWAIT;
if (flags & MSG_ERRQUEUE)
return sock_recv_errqueue(sk, msg, size,
SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
@@ -870,7 +866,7 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
return err;
}
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(RAW_MIN_NAMELEN);
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index e3ac36380520..a25ec93729b9 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
if (!sdata)
return -ENOENT;
- bpf_selem_unlink(SELEM(sdata));
+ bpf_selem_unlink(SELEM(sdata), true);
return 0;
}
@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk)
* sk_storage.
*/
bpf_selem_unlink_map(selem);
- free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
- selem, true);
+ free_sk_storage = bpf_selem_unlink_storage_nolock(
+ sk_storage, selem, true, false);
}
raw_spin_unlock_bh(&sk_storage->lock);
rcu_read_unlock();
@@ -338,7 +338,7 @@ bpf_sk_storage_ptr(void *owner)
return &sk->sk_bpf_storage;
}
-static int sk_storage_map_btf_id;
+BTF_ID_LIST_SINGLE(sk_storage_map_btf_ids, struct, bpf_local_storage_map)
const struct bpf_map_ops sk_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
@@ -349,8 +349,7 @@ const struct bpf_map_ops sk_storage_map_ops = {
.map_update_elem = bpf_fd_sk_storage_update_elem,
.map_delete_elem = bpf_fd_sk_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf,
- .map_btf_name = "bpf_local_storage_map",
- .map_btf_id = &sk_storage_map_btf_id,
+ .map_btf_id = &sk_storage_map_btf_ids[0],
.map_local_storage_charge = bpf_sk_storage_charge,
.map_local_storage_uncharge = bpf_sk_storage_uncharge,
.map_owner_storage_ptr = bpf_sk_storage_ptr,
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ee290776c661..50f4faeea76c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -62,8 +62,6 @@
#include <trace/events/skb.h>
#include <net/busy_poll.h>
-#include "datagram.h"
-
/*
* Is a socket 'connection oriented' ?
*/
@@ -310,12 +308,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
EXPORT_SYMBOL(__skb_recv_datagram);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
- int noblock, int *err)
+ int *err)
{
int off = 0;
- return __skb_recv_datagram(sk, &sk->sk_receive_queue,
- flags | (noblock ? MSG_DONTWAIT : 0),
+ return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
&off, err);
}
EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/datagram.h b/net/core/datagram.h
deleted file mode 100644
index bcfb75bfa3b2..000000000000
--- a/net/core/datagram.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _NET_CORE_DATAGRAM_H_
-#define _NET_CORE_DATAGRAM_H_
-
-#include <linux/types.h>
-
-struct sock;
-struct sk_buff;
-struct iov_iter;
-
-int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *from, size_t length);
-
-#endif /* _NET_CORE_DATAGRAM_H_ */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1461c2d9dec8..c2d73595a7c3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -151,6 +151,7 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
+#include "dev.h"
#include "net-sysfs.h"
@@ -701,6 +702,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
if (WARN_ON_ONCE(last_dev == ctx.dev))
return -1;
}
+
+ if (!ctx.dev)
+ return ret;
+
path = dev_fwd_path(stack);
if (!path)
return -1;
@@ -3920,6 +3925,25 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
return skb;
}
+
+static struct netdev_queue *
+netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
+{
+ int qm = skb_get_queue_mapping(skb);
+
+ return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
+}
+
+static bool netdev_xmit_txqueue_skipped(void)
+{
+ return __this_cpu_read(softnet_data.xmit.skip_txqueue);
+}
+
+void netdev_xmit_skip_txqueue(bool skip)
+{
+ __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
+}
+EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#endif /* CONFIG_NET_EGRESS */
#ifdef CONFIG_XPS
@@ -4087,10 +4111,10 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{
struct net_device *dev = skb->dev;
- struct netdev_queue *txq;
+ struct netdev_queue *txq = NULL;
struct Qdisc *q;
int rc = -ENOMEM;
bool again = false;
@@ -4118,11 +4142,17 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (!skb)
goto out;
}
+
+ netdev_xmit_skip_txqueue(false);
+
nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
nf_skip_egress(skb, false);
+
+ if (netdev_xmit_txqueue_skipped())
+ txq = netdev_tx_queue_mapping(dev, skb);
}
#endif
/* If device/qdisc don't need skb->dst, release it right now while
@@ -4133,7 +4163,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
else
skb_dst_force(skb);
- txq = netdev_core_pick_tx(dev, skb, sb_dev);
+ if (!txq)
+ txq = netdev_core_pick_tx(dev, skb, sb_dev);
+
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@@ -4203,18 +4235,7 @@ out:
rcu_read_unlock_bh();
return rc;
}
-
-int dev_queue_xmit(struct sk_buff *skb)
-{
- return __dev_queue_xmit(skb, NULL);
-}
-EXPORT_SYMBOL(dev_queue_xmit);
-
-int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
-{
- return __dev_queue_xmit(skb, sb_dev);
-}
-EXPORT_SYMBOL(dev_queue_xmit_accel);
+EXPORT_SYMBOL(__dev_queue_xmit);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
@@ -4513,6 +4534,12 @@ static void rps_trigger_softirq(void *data)
#endif /* CONFIG_RPS */
+/* Called from hardirq (IPI) context */
+static void trigger_rx_softirq(void *data __always_unused)
+{
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+}
+
/*
* Check if this softnet_data structure is another cpu one
* If yes, queue it to our IPI list and return 1
@@ -5370,13 +5397,11 @@ check_vlan_id:
*ppt_prev = pt_prev;
} else {
drop:
- if (!deliver_exact) {
+ if (!deliver_exact)
dev_core_stats_rx_dropped_inc(skb->dev);
- kfree_skb_reason(skb, SKB_DROP_REASON_PTYPE_ABSENT);
- } else {
+ else
dev_core_stats_rx_nohandler_inc(skb->dev);
- kfree_skb(skb);
- }
+ kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
@@ -6278,8 +6303,8 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
{
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
@@ -6312,7 +6337,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
if (dev->threaded && napi_kthread_create(napi))
dev->threaded = 0;
}
-EXPORT_SYMBOL(netif_napi_add);
+EXPORT_SYMBOL(netif_napi_add_weight);
void napi_disable(struct napi_struct *n)
{
@@ -6541,6 +6566,28 @@ static int napi_threaded_poll(void *data)
return 0;
}
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+ struct sk_buff *skb, *next;
+ unsigned long flags;
+
+ /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+ if (!READ_ONCE(sd->defer_list))
+ return;
+
+ spin_lock_irqsave(&sd->defer_lock, flags);
+ skb = sd->defer_list;
+ sd->defer_list = NULL;
+ sd->defer_count = 0;
+ spin_unlock_irqrestore(&sd->defer_lock, flags);
+
+ while (skb != NULL) {
+ next = skb->next;
+ __kfree_skb(skb);
+ skb = next;
+ }
+}
+
static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -6559,7 +6606,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
if (list_empty(&list)) {
if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
- return;
+ goto end;
break;
}
@@ -6586,6 +6633,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
+end:
+ skb_defer_free_flush(sd);
}
struct netdev_adjacent {
@@ -8641,7 +8690,6 @@ void dev_set_group(struct net_device *dev, int new_group)
{
dev->group = new_group;
}
-EXPORT_SYMBOL(dev_set_group);
/**
* dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
@@ -8756,7 +8804,6 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
return -ENODEV;
return ops->ndo_change_carrier(dev, new_carrier);
}
-EXPORT_SYMBOL(dev_change_carrier);
/**
* dev_get_phys_port_id - Get device physical port ID
@@ -8774,7 +8821,6 @@ int dev_get_phys_port_id(struct net_device *dev,
return -EOPNOTSUPP;
return ops->ndo_get_phys_port_id(dev, ppid);
}
-EXPORT_SYMBOL(dev_get_phys_port_id);
/**
* dev_get_phys_port_name - Get device physical port name
@@ -8797,7 +8843,6 @@ int dev_get_phys_port_name(struct net_device *dev,
}
return devlink_compat_phys_port_name_get(dev, name, len);
}
-EXPORT_SYMBOL(dev_get_phys_port_name);
/**
* dev_get_port_parent_id - Get the device's port parent identifier
@@ -8879,7 +8924,6 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
dev->proto_down = proto_down;
return 0;
}
-EXPORT_SYMBOL(dev_change_proto_down);
/**
* dev_change_proto_down_reason - proto down reason
@@ -8904,7 +8948,6 @@ void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
}
}
}
-EXPORT_SYMBOL(dev_change_proto_down_reason);
struct bpf_xdp_link {
struct bpf_link link;
@@ -9431,7 +9474,7 @@ static int dev_new_index(struct net *net)
}
/* Delayed registration/unregisteration */
-static LIST_HEAD(net_todo_list);
+LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
static void net_set_todo(struct net_device *dev)
@@ -10355,6 +10398,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
+ storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
}
}
return storage;
@@ -11297,6 +11341,8 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
+ INIT_CSD(&sd->defer_csd, trigger_rx_softirq, NULL);
+ spin_lock_init(&sd->defer_lock);
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
diff --git a/net/core/dev.h b/net/core/dev.h
new file mode 100644
index 000000000000..27923df00637
--- /dev/null
+++ b/net/core/dev.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_CORE_DEV_H
+#define _NET_CORE_DEV_H
+
+#include <linux/types.h>
+
+struct net;
+struct net_device;
+struct netdev_bpf;
+struct netdev_phys_item_id;
+struct netlink_ext_ack;
+
+/* Random bits of netdevice that don't need to be exposed */
+#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
+struct sd_flow_limit {
+ u64 count;
+ unsigned int num_buckets;
+ unsigned int history_head;
+ u16 history[FLOW_LIMIT_HISTORY];
+ u8 buckets[];
+};
+
+extern int netdev_flow_limit_table_len;
+
+#ifdef CONFIG_PROC_FS
+int __init dev_proc_init(void);
+#else
+#define dev_proc_init() 0
+#endif
+
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_run_queue(void);
+
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
+void dev_addr_check(struct net_device *dev);
+
+/* sysctls not referred to from outside net/core/ */
+extern int netdev_budget;
+extern unsigned int netdev_budget_usecs;
+
+extern int netdev_tstamp_prequeue;
+extern int netdev_unregister_timeout_secs;
+extern int weight_p;
+extern int dev_weight_rx_bias;
+extern int dev_weight_tx_bias;
+
+/* rtnl helpers */
+extern struct list_head net_todo_list;
+void netdev_run_todo(void);
+
+/* netdev management, shared between various uAPI entry points */
+struct netdev_name_node {
+ struct hlist_node hlist;
+ struct list_head list;
+ struct net_device *dev;
+ const char *name;
+};
+
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_change_name(struct net_device *dev, const char *newname);
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+
+int dev_validate_mtu(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
+int dev_set_mtu_ext(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
+
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len);
+
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
+void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
+ u32 value);
+
+typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
+int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
+ int fd, int expected_fd, u32 flags);
+
+int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
+void dev_set_group(struct net_device *dev, int new_group);
+int dev_change_carrier(struct net_device *dev, bool new_carrier);
+
+void __dev_set_rx_mode(struct net_device *dev);
+
+#endif
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index bead38ca50bd..baa63dee2829 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -12,6 +12,8 @@
#include <linux/export.h>
#include <linux/list.h>
+#include "dev.h"
+
/*
* General list handling functions
*/
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 1b807d119da5..4f6be442ae7e 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -10,6 +10,8 @@
#include <net/dsa.h>
#include <net/wext.h>
+#include "dev.h"
+
/*
* Map an interface index to its name (SIOCGIFNAME)
*/
diff --git a/net/core/devlink.c b/net/core/devlink.c
index aeca13b6e57b..5f441a0e34f4 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -54,6 +54,8 @@ struct devlink {
struct list_head trap_list;
struct list_head trap_group_list;
struct list_head trap_policer_list;
+ struct list_head linecard_list;
+ struct mutex linecards_lock; /* protects linecard_list */
const struct devlink_ops *ops;
u64 features;
struct xarray snapshot_ids;
@@ -70,6 +72,24 @@ struct devlink {
char priv[] __aligned(NETDEV_ALIGN);
};
+struct devlink_linecard_ops;
+struct devlink_linecard_type;
+
+struct devlink_linecard {
+ struct list_head list;
+ struct devlink *devlink;
+ unsigned int index;
+ refcount_t refcount;
+ const struct devlink_linecard_ops *ops;
+ void *priv;
+ enum devlink_linecard_state state;
+ struct mutex state_lock; /* Protects state and device_list */
+ const char *type;
+ struct devlink_linecard_type *types;
+ unsigned int types_count;
+ struct list_head device_list;
+};
+
/**
* struct devlink_resource - devlink resource
* @name: name of the resource
@@ -397,6 +417,58 @@ devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
return ERR_PTR(-EINVAL);
}
+static struct devlink_linecard *
+devlink_linecard_get_by_index(struct devlink *devlink,
+ unsigned int linecard_index)
+{
+ struct devlink_linecard *devlink_linecard;
+
+ list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
+ if (devlink_linecard->index == linecard_index)
+ return devlink_linecard;
+ }
+ return NULL;
+}
+
+static bool devlink_linecard_index_exists(struct devlink *devlink,
+ unsigned int linecard_index)
+{
+ return devlink_linecard_get_by_index(devlink, linecard_index);
+}
+
+static struct devlink_linecard *
+devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+{
+ if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
+ u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
+ struct devlink_linecard *linecard;
+
+ mutex_lock(&devlink->linecards_lock);
+ linecard = devlink_linecard_get_by_index(devlink, linecard_index);
+ if (linecard)
+ refcount_inc(&linecard->refcount);
+ mutex_unlock(&devlink->linecards_lock);
+ if (!linecard)
+ return ERR_PTR(-ENODEV);
+ return linecard;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_linecard *
+devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ return devlink_linecard_get_from_attrs(devlink, info->attrs);
+}
+
+static void devlink_linecard_put(struct devlink_linecard *linecard)
+{
+ if (refcount_dec_and_test(&linecard->refcount)) {
+ mutex_destroy(&linecard->state_lock);
+ kfree(linecard);
+ }
+}
+
struct devlink_sb {
struct list_head list;
unsigned int index;
@@ -617,16 +689,18 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
+#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
/* The per devlink instance lock is taken by default in the pre-doit
* operation, yet several commands do not require this. The global
* devlink lock is taken and protects from disruption by user-calls.
*/
-#define DEVLINK_NL_FLAG_NO_LOCK BIT(4)
+#define DEVLINK_NL_FLAG_NO_LOCK BIT(5)
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
+ struct devlink_linecard *linecard;
struct devlink_port *devlink_port;
struct devlink *devlink;
int err;
@@ -669,6 +743,13 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
goto unlock;
}
info->user_ptr[1] = rate_node;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard)) {
+ err = PTR_ERR(linecard);
+ goto unlock;
+ }
+ info->user_ptr[1] = linecard;
}
return 0;
@@ -683,9 +764,14 @@ unlock:
static void devlink_nl_post_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
+ struct devlink_linecard *linecard;
struct devlink *devlink;
devlink = info->user_ptr[0];
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
+ linecard = info->user_ptr[1];
+ devlink_linecard_put(linecard);
+ }
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
mutex_unlock(&devlink->lock);
devlink_put(devlink);
@@ -1158,6 +1244,10 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
goto nla_put_failure;
if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
goto nla_put_failure;
+ if (devlink_port->linecard &&
+ nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
+ devlink_port->linecard->index))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
@@ -1964,6 +2054,562 @@ static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
return err;
}
+struct devlink_linecard_type {
+ const char *type;
+ const void *priv;
+};
+
+struct devlink_linecard_device {
+ struct list_head list;
+ unsigned int index;
+ void *priv;
+};
+
+static int
+devlink_nl_linecard_device_fill(struct sk_buff *msg,
+ struct devlink_linecard_device *linecard_device)
+{
+ struct nlattr *attr;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE);
+ if (!attr)
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX,
+ linecard_device->index)) {
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(msg, attr);
+
+ return 0;
+}
+
+static int devlink_nl_linecard_devices_fill(struct sk_buff *msg,
+ struct devlink_linecard *linecard)
+{
+ struct devlink_linecard_device *linecard_device;
+ struct nlattr *attr;
+ int err;
+
+ if (list_empty(&linecard->device_list))
+ return 0;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST);
+ if (!attr)
+ return -EMSGSIZE;
+ list_for_each_entry(linecard_device, &linecard->device_list, list) {
+ err = devlink_nl_linecard_device_fill(msg, linecard_device);
+ if (err) {
+ nla_nest_cancel(msg, attr);
+ return err;
+ }
+ }
+ nla_nest_end(msg, attr);
+
+ return 0;
+}
+
+static int devlink_nl_linecard_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_linecard *linecard,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_linecard_type *linecard_type;
+ struct nlattr *attr;
+ void *hdr;
+ int err;
+ int i;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
+ goto nla_put_failure;
+ if (linecard->type &&
+ nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
+ goto nla_put_failure;
+
+ if (linecard->types_count) {
+ attr = nla_nest_start(msg,
+ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
+ if (!attr)
+ goto nla_put_failure;
+ for (i = 0; i < linecard->types_count; i++) {
+ linecard_type = &linecard->types[i];
+ if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
+ linecard_type->type)) {
+ nla_nest_cancel(msg, attr);
+ goto nla_put_failure;
+ }
+ }
+ nla_nest_end(msg, attr);
+ }
+
+ err = devlink_nl_linecard_devices_fill(msg, linecard);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_linecard_notify(struct devlink_linecard *linecard,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = linecard->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
+ cmd != DEVLINK_CMD_LINECARD_DEL);
+
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
+ NULL);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_linecard *linecard = info->user_ptr[1];
+ struct devlink *devlink = linecard->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_NEW,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_linecard *linecard;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ unsigned long index;
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) {
+ if (!devlink_try_get(devlink))
+ continue;
+
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ goto retry;
+
+ mutex_lock(&devlink->linecards_lock);
+ list_for_each_entry(linecard, &devlink->linecard_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ cb->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ mutex_unlock(&devlink->linecards_lock);
+ devlink_put(devlink);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->linecards_lock);
+retry:
+ devlink_put(devlink);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static struct devlink_linecard_type *
+devlink_linecard_type_lookup(struct devlink_linecard *linecard,
+ const char *type)
+{
+ struct devlink_linecard_type *linecard_type;
+ int i;
+
+ for (i = 0; i < linecard->types_count; i++) {
+ linecard_type = &linecard->types[i];
+ if (!strcmp(type, linecard_type->type))
+ return linecard_type;
+ }
+ return NULL;
+}
+
+static int devlink_linecard_type_set(struct devlink_linecard *linecard,
+ const char *type,
+ struct netlink_ext_ack *extack)
+{
+ const struct devlink_linecard_ops *ops = linecard->ops;
+ struct devlink_linecard_type *linecard_type;
+ int err;
+
+ mutex_lock(&linecard->state_lock);
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+ err = -EBUSY;
+ goto out;
+ }
+
+ linecard_type = devlink_linecard_type_lookup(linecard, type);
+ if (!linecard_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
+ linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned");
+ err = -EBUSY;
+ /* Check if the line card is provisioned in the same
+ * way the user asks. In case it is, make the operation
+ * to return success.
+ */
+ if (ops->same_provision &&
+ ops->same_provision(linecard, linecard->priv,
+ linecard_type->type,
+ linecard_type->priv))
+ err = 0;
+ goto out;
+ }
+
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
+ linecard->type = linecard_type->type;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ err = ops->provision(linecard, linecard->priv, linecard_type->type,
+ linecard_type->priv, extack);
+ if (err) {
+ /* Provisioning failed. Assume the linecard is unprovisioned
+ * for future operations.
+ */
+ mutex_lock(&linecard->state_lock);
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ }
+ return err;
+
+out:
+ mutex_unlock(&linecard->state_lock);
+ return err;
+}
+
+static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mutex_lock(&linecard->state_lock);
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ err = 0;
+ goto out;
+ }
+
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
+ NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned");
+ err = 0;
+ goto out;
+ }
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ err = linecard->ops->unprovision(linecard, linecard->priv,
+ extack);
+ if (err) {
+ /* Unprovisioning failed. Assume the linecard is unprovisioned
+ * for future operations.
+ */
+ mutex_lock(&linecard->state_lock);
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ }
+ return err;
+
+out:
+ mutex_unlock(&linecard->state_lock);
+ return err;
+}
+
+static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_linecard *linecard = info->user_ptr[1];
+ struct netlink_ext_ack *extack = info->extack;
+ int err;
+
+ if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
+ const char *type;
+
+ type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
+ if (strcmp(type, "")) {
+ err = devlink_linecard_type_set(linecard, type, extack);
+ if (err)
+ return err;
+ } else {
+ err = devlink_linecard_type_unset(linecard, extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct devlink_info_req {
+ struct sk_buff *msg;
+};
+
+static int
+devlink_nl_linecard_device_info_fill(struct sk_buff *msg,
+ struct devlink_linecard *linecard,
+ struct devlink_linecard_device *linecard_device,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *attr;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE);
+ if (!attr)
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX,
+ linecard_device->index)) {
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+ }
+ if (linecard->ops->device_info_get) {
+ struct devlink_info_req req;
+ int err;
+
+ req.msg = msg;
+ err = linecard->ops->device_info_get(linecard_device,
+ linecard_device->priv,
+ &req, extack);
+ if (err) {
+ nla_nest_cancel(msg, attr);
+ return err;
+ }
+ }
+ nla_nest_end(msg, attr);
+
+ return 0;
+}
+
+static int devlink_nl_linecard_devices_info_fill(struct sk_buff *msg,
+ struct devlink_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_linecard_device *linecard_device;
+ struct nlattr *attr;
+ int err;
+
+ if (list_empty(&linecard->device_list))
+ return 0;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST);
+ if (!attr)
+ return -EMSGSIZE;
+ list_for_each_entry(linecard_device, &linecard->device_list, list) {
+ err = devlink_nl_linecard_device_info_fill(msg, linecard,
+ linecard_device,
+ extack);
+ if (err) {
+ nla_nest_cancel(msg, attr);
+ return err;
+ }
+ }
+ nla_nest_end(msg, attr);
+
+ return 0;
+}
+
+static int
+devlink_nl_linecard_info_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_linecard *linecard,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags, struct netlink_ext_ack *extack)
+{
+ struct devlink_info_req req;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
+ goto nla_put_failure;
+
+ req.msg = msg;
+ err = linecard->ops->info_get(linecard, linecard->priv, &req, extack);
+ if (err)
+ goto nla_put_failure;
+
+ err = devlink_nl_linecard_devices_info_fill(msg, linecard, extack);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int devlink_nl_cmd_linecard_info_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_linecard *linecard = info->user_ptr[1];
+ struct devlink *devlink = linecard->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ if (!linecard->ops->info_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_info_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_INFO_GET,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_linecard_info_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_linecard *linecard;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ unsigned long index;
+ int idx = 0;
+ int err = 0;
+
+ mutex_lock(&devlink_mutex);
+ xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) {
+ if (!devlink_try_get(devlink))
+ continue;
+
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ goto retry;
+
+ mutex_lock(&devlink->linecards_lock);
+ list_for_each_entry(linecard, &devlink->linecard_list, list) {
+ if (idx < start || !linecard->ops->info_get) {
+ idx++;
+ continue;
+ }
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_info_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_INFO_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ cb->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ mutex_unlock(&devlink->linecards_lock);
+ devlink_put(devlink);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->linecards_lock);
+retry:
+ devlink_put(devlink);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
struct devlink_sb *devlink_sb,
enum devlink_command cmd, u32 portid,
@@ -5956,10 +6602,6 @@ out_dev:
return err;
}
-struct devlink_info_req {
- struct sk_buff *msg;
-};
-
int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
{
return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
@@ -8589,6 +9231,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
};
static const struct genl_small_ops devlink_nl_ops[] = {
@@ -8665,6 +9309,26 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
+ .cmd = DEVLINK_CMD_LINECARD_GET,
+ .doit = devlink_nl_cmd_linecard_get_doit,
+ .dumpit = devlink_nl_cmd_linecard_get_dumpit,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_LINECARD_SET,
+ .doit = devlink_nl_cmd_linecard_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
+ },
+ {
+ .cmd = DEVLINK_CMD_LINECARD_INFO_GET,
+ .doit = devlink_nl_cmd_linecard_info_get_doit,
+ .dumpit = devlink_nl_cmd_linecard_info_get_dumpit,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
+ /* can be retrieved by unprivileged users */
+ },
+ {
.cmd = DEVLINK_CMD_SB_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_get_doit,
@@ -9043,6 +9707,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
write_pnet(&devlink->_net, net);
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->rate_list);
+ INIT_LIST_HEAD(&devlink->linecard_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
INIT_LIST_HEAD(&devlink->resource_list);
@@ -9054,6 +9719,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
INIT_LIST_HEAD(&devlink->trap_policer_list);
mutex_init(&devlink->lock);
mutex_init(&devlink->reporters_lock);
+ mutex_init(&devlink->linecards_lock);
refcount_set(&devlink->refcount, 1);
init_completion(&devlink->comp);
@@ -9080,10 +9746,14 @@ static void devlink_notify_register(struct devlink *devlink)
struct devlink_param_item *param_item;
struct devlink_trap_item *trap_item;
struct devlink_port *devlink_port;
+ struct devlink_linecard *linecard;
struct devlink_rate *rate_node;
struct devlink_region *region;
devlink_notify(devlink, DEVLINK_CMD_NEW);
+ list_for_each_entry(linecard, &devlink->linecard_list, list)
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+
list_for_each_entry(devlink_port, &devlink->port_list, list)
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
@@ -9191,6 +9861,7 @@ void devlink_free(struct devlink *devlink)
{
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ mutex_destroy(&devlink->linecards_lock);
mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
WARN_ON(!list_empty(&devlink->trap_policer_list));
@@ -9203,6 +9874,7 @@ void devlink_free(struct devlink *devlink)
WARN_ON(!list_empty(&devlink->dpipe_table_list));
WARN_ON(!list_empty(&devlink->sb_list));
WARN_ON(!list_empty(&devlink->rate_list));
+ WARN_ON(!list_empty(&devlink->linecard_list));
WARN_ON(!list_empty(&devlink->port_list));
xa_destroy(&devlink->snapshot_ids);
@@ -9681,6 +10353,21 @@ void devlink_rate_nodes_destroy(struct devlink *devlink)
}
EXPORT_SYMBOL_GPL(devlink_rate_nodes_destroy);
+/**
+ * devlink_port_linecard_set - Link port with a linecard
+ *
+ * @devlink_port: devlink port
+ * @linecard: devlink linecard
+ */
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard)
+{
+ if (WARN_ON(devlink_port->devlink))
+ return;
+ devlink_port->linecard = linecard;
+}
+EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
+
static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
char *name, size_t len)
{
@@ -9692,7 +10379,12 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- n = snprintf(name, len, "p%u", attrs->phys.port_number);
+ if (devlink_port->linecard)
+ n = snprintf(name, len, "l%u",
+ devlink_port->linecard->index);
+ if (n < len)
+ n += snprintf(name + n, len - n, "p%u",
+ attrs->phys.port_number);
if (n < len && attrs->split)
n += snprintf(name + n, len - n, "s%u",
attrs->phys.split_subport_number);
@@ -9747,6 +10439,256 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
return 0;
}
+static int devlink_linecard_types_init(struct devlink_linecard *linecard)
+{
+ struct devlink_linecard_type *linecard_type;
+ unsigned int count;
+ int i;
+
+ count = linecard->ops->types_count(linecard, linecard->priv);
+ linecard->types = kmalloc_array(count, sizeof(*linecard_type),
+ GFP_KERNEL);
+ if (!linecard->types)
+ return -ENOMEM;
+ linecard->types_count = count;
+
+ for (i = 0; i < count; i++) {
+ linecard_type = &linecard->types[i];
+ linecard->ops->types_get(linecard, linecard->priv, i,
+ &linecard_type->type,
+ &linecard_type->priv);
+ }
+ return 0;
+}
+
+static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
+{
+ kfree(linecard->types);
+}
+
+/**
+ * devlink_linecard_create - Create devlink linecard
+ *
+ * @devlink: devlink
+ * @linecard_index: driver-specific numerical identifier of the linecard
+ * @ops: linecards ops
+ * @priv: user priv pointer
+ *
+ * Create devlink linecard instance with provided linecard index.
+ * Caller can use any indexing, even hw-related one.
+ *
+ * Return: Line card structure or an ERR_PTR() encoded error code.
+ */
+struct devlink_linecard *
+devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv)
+{
+ struct devlink_linecard *linecard;
+ int err;
+
+ if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
+ !ops->types_count || !ops->types_get))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&devlink->linecards_lock);
+ if (devlink_linecard_index_exists(devlink, linecard_index)) {
+ mutex_unlock(&devlink->linecards_lock);
+ return ERR_PTR(-EEXIST);
+ }
+
+ linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
+ if (!linecard) {
+ mutex_unlock(&devlink->linecards_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ linecard->devlink = devlink;
+ linecard->index = linecard_index;
+ linecard->ops = ops;
+ linecard->priv = priv;
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ mutex_init(&linecard->state_lock);
+ INIT_LIST_HEAD(&linecard->device_list);
+
+ err = devlink_linecard_types_init(linecard);
+ if (err) {
+ mutex_destroy(&linecard->state_lock);
+ kfree(linecard);
+ mutex_unlock(&devlink->linecards_lock);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&linecard->list, &devlink->linecard_list);
+ refcount_set(&linecard->refcount, 1);
+ mutex_unlock(&devlink->linecards_lock);
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ return linecard;
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_create);
+
+/**
+ * devlink_linecard_destroy - Destroy devlink linecard
+ *
+ * @linecard: devlink linecard
+ */
+void devlink_linecard_destroy(struct devlink_linecard *linecard)
+{
+ struct devlink *devlink = linecard->devlink;
+
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
+ WARN_ON(!list_empty(&linecard->device_list));
+ mutex_lock(&devlink->linecards_lock);
+ list_del(&linecard->list);
+ devlink_linecard_types_fini(linecard);
+ mutex_unlock(&devlink->linecards_lock);
+ devlink_linecard_put(linecard);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_destroy);
+
+/**
+ * devlink_linecard_device_create - Create a device on linecard
+ *
+ * @linecard: devlink linecard
+ * @device_index: index of the linecard device
+ * @priv: user priv pointer
+ *
+ * Return: Line card device structure or an ERR_PTR() encoded error code.
+ */
+struct devlink_linecard_device *
+devlink_linecard_device_create(struct devlink_linecard *linecard,
+ unsigned int device_index, void *priv)
+{
+ struct devlink_linecard_device *linecard_device;
+
+ linecard_device = kzalloc(sizeof(*linecard_device), GFP_KERNEL);
+ if (!linecard_device)
+ return ERR_PTR(-ENOMEM);
+ linecard_device->index = device_index;
+ linecard_device->priv = priv;
+ mutex_lock(&linecard->state_lock);
+ list_add_tail(&linecard_device->list, &linecard->device_list);
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ return linecard_device;
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_device_create);
+
+/**
+ * devlink_linecard_device_destroy - Destroy device on linecard
+ *
+ * @linecard: devlink linecard
+ * @linecard_device: devlink linecard device
+ */
+void
+devlink_linecard_device_destroy(struct devlink_linecard *linecard,
+ struct devlink_linecard_device *linecard_device)
+{
+ mutex_lock(&linecard->state_lock);
+ list_del(&linecard_device->list);
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ kfree(linecard_device);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_device_destroy);
+
+/**
+ * devlink_linecard_provision_set - Set provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ * @type: linecard type
+ *
+ * This is either called directly from the provision() op call or
+ * as a result of the provision() op call asynchronously.
+ */
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->type && strcmp(linecard->type, type));
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+ linecard->type = type;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
+
+/**
+ * devlink_linecard_provision_clear - Clear provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ *
+ * This is either called directly from the unprovision() op call or
+ * as a result of the unprovision() op call asynchronously.
+ */
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(!list_empty(&linecard->device_list));
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
+
+/**
+ * devlink_linecard_provision_fail - Fail provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ *
+ * This is either called directly from the provision() op call or
+ * as a result of the provision() op call asynchronously.
+ */
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
+
+/**
+ * devlink_linecard_activate - Set linecard active
+ *
+ * @linecard: devlink linecard
+ */
+void devlink_linecard_activate(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
+ linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_activate);
+
+/**
+ * devlink_linecard_deactivate - Set linecard inactive
+ *
+ * @linecard: devlink linecard
+ */
+void devlink_linecard_deactivate(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ switch (linecard->state) {
+ case DEVLINK_LINECARD_STATE_ACTIVE:
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ break;
+ case DEVLINK_LINECARD_STATE_UNPROVISIONING:
+ /* Line card is being deactivated as part
+ * of unprovisioning flow.
+ */
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
+
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
diff --git a/net/core/filter.c b/net/core/filter.c
index 64470a727ef7..b741b9f7e6a9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
- if (unlikely(offset > 0xffff))
+ if (unlikely(offset > INT_MAX))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
{
void *ptr;
- if (unlikely(offset > 0xffff))
+ if (unlikely(offset > INT_MAX))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);
@@ -5173,7 +5173,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
if (val <= 0 || tp->data_segs_out > tp->syn_data)
ret = -EINVAL;
else
- tp->snd_cwnd = val;
+ tcp_snd_cwnd_set(tp, val);
break;
case TCP_BPF_SNDCWND_CLAMP:
if (val <= 0) {
@@ -6621,7 +6621,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.func = bpf_sk_release,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON | OBJ_RELEASE,
};
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -7099,7 +7099,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
*/
switch (((struct iphdr *)iph)->version) {
case 4:
- if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
+ if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
return -EINVAL;
mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 6f7ec72016dc..6aee04f75e3e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1035,6 +1035,16 @@ bool __skb_flow_dissect(const struct net *net,
memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
}
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) {
+ struct flow_dissector_key_num_of_vlans *key_num_of_vlans;
+
+ key_num_of_vlans = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
+ target_container);
+ key_num_of_vlans->num_of_vlans = 0;
+ }
+
proto_again:
fdret = FLOW_DISSECT_RET_CONTINUE;
@@ -1158,6 +1168,16 @@ proto_again:
nhoff += sizeof(*vlan);
}
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) {
+ struct flow_dissector_key_num_of_vlans *key_nvs;
+
+ key_nvs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
+ target_container);
+ key_nvs->num_of_vlans++;
+ }
+
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
} else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 95098d1a49bd..a244d3bade7d 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -18,6 +18,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
+#include "dev.h"
enum lw_bits {
LW_URGENT = 0,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f64ebd050f6c..47b6c1f0fdbb 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3728,7 +3728,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
char *p_name;
- t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
+ t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
if (!t)
goto err;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 88cc0ad7d386..1ec23bf8b05c 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -4,6 +4,8 @@
#include <linux/seq_file.h>
#include <net/wext.h>
+#include "dev.h"
+
#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
#define get_bucket(x) ((x) >> BUCKET_SPACE)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9cbc1c8289bc..4980c3a50475 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
+#include "dev.h"
#include "net-sysfs.h"
#ifdef CONFIG_SYSFS
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 1943c0f0307d..bdbadfaee867 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -18,6 +18,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h>
+#include <linux/ethtool.h>
#include <trace/events/page_pool.h>
@@ -36,6 +37,26 @@
this_cpu_inc(s->__stat); \
} while (0)
+#define recycle_stat_add(pool, __stat, val) \
+ do { \
+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
+ this_cpu_add(s->__stat, val); \
+ } while (0)
+
+static const char pp_stats[][ETH_GSTRING_LEN] = {
+ "rx_pp_alloc_fast",
+ "rx_pp_alloc_slow",
+ "rx_pp_alloc_slow_ho",
+ "rx_pp_alloc_empty",
+ "rx_pp_alloc_refill",
+ "rx_pp_alloc_waive",
+ "rx_pp_recycle_cached",
+ "rx_pp_recycle_cache_full",
+ "rx_pp_recycle_ring",
+ "rx_pp_recycle_ring_full",
+ "rx_pp_recycle_released_ref",
+};
+
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats)
{
@@ -44,7 +65,13 @@ bool page_pool_get_stats(struct page_pool *pool,
if (!stats)
return false;
- memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats));
+ /* The caller is responsible to initialize stats. */
+ stats->alloc_stats.fast += pool->alloc_stats.fast;
+ stats->alloc_stats.slow += pool->alloc_stats.slow;
+ stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
+ stats->alloc_stats.empty += pool->alloc_stats.empty;
+ stats->alloc_stats.refill += pool->alloc_stats.refill;
+ stats->alloc_stats.waive += pool->alloc_stats.waive;
for_each_possible_cpu(cpu) {
const struct page_pool_recycle_stats *pcpu =
@@ -60,9 +87,50 @@ bool page_pool_get_stats(struct page_pool *pool,
return true;
}
EXPORT_SYMBOL(page_pool_get_stats);
+
+u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
+ memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ return data;
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
+
+int page_pool_ethtool_stats_get_count(void)
+{
+ return ARRAY_SIZE(pp_stats);
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
+
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ struct page_pool_stats *pool_stats = stats;
+
+ *data++ = pool_stats->alloc_stats.fast;
+ *data++ = pool_stats->alloc_stats.slow;
+ *data++ = pool_stats->alloc_stats.slow_high_order;
+ *data++ = pool_stats->alloc_stats.empty;
+ *data++ = pool_stats->alloc_stats.refill;
+ *data++ = pool_stats->alloc_stats.waive;
+ *data++ = pool_stats->recycle_stats.cached;
+ *data++ = pool_stats->recycle_stats.cache_full;
+ *data++ = pool_stats->recycle_stats.ring;
+ *data++ = pool_stats->recycle_stats.ring_full;
+ *data++ = pool_stats->recycle_stats.released_refcnt;
+
+ return data;
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get);
+
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)
+#define recycle_stat_add(pool, __stat, val)
#endif
static int page_pool_init(struct page_pool *pool,
@@ -566,9 +634,13 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
/* Bulk producer into ptr_ring page_pool cache */
page_pool_ring_lock(pool);
for (i = 0; i < bulk_len; i++) {
- if (__ptr_ring_produce(&pool->ring, data[i]))
- break; /* ring full */
+ if (__ptr_ring_produce(&pool->ring, data[i])) {
+ /* ring full */
+ recycle_stat_inc(pool, ring_full);
+ break;
+ }
}
+ recycle_stat_add(pool, ring, i);
page_pool_ring_unlock(pool);
/* Hopefully all pages was return into ptr_ring */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1381ea6d52e..eea5ed09e1bb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -54,6 +54,8 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
+#include "dev.h"
+
#define RTNL_MAX_TYPE 50
#define RTNL_SLAVE_MAX_TYPE 40
@@ -95,6 +97,39 @@ void __rtnl_unlock(void)
defer_kfree_skb_list = NULL;
+ /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
+ * is used. In some places, e.g. in cfg80211, we have code that will do
+ * something like
+ * rtnl_lock()
+ * wiphy_lock()
+ * ...
+ * rtnl_unlock()
+ *
+ * and because netdev_run_todo() acquires the RTNL for items on the list
+ * we could cause a situation such as this:
+ * Thread 1 Thread 2
+ * rtnl_lock()
+ * unregister_netdevice()
+ * __rtnl_unlock()
+ * rtnl_lock()
+ * wiphy_lock()
+ * rtnl_unlock()
+ * netdev_run_todo()
+ * __rtnl_unlock()
+ *
+ * // list not empty now
+ * // because of thread 2
+ * rtnl_lock()
+ * while (!list_empty(...))
+ * rtnl_lock()
+ * wiphy_lock()
+ * **** DEADLOCK ****
+ *
+ * However, usage of __rtnl_unlock() is rare, and so we can ensure that
+ * it's not used in cases where something is added to do the list.
+ */
+ WARN_ON(!list_empty(&net_todo_list));
+
mutex_unlock(&rtnl_mutex);
while (head) {
@@ -214,6 +249,8 @@ static int rtnl_register_internal(struct module *owner,
if (dumpit)
link->dumpit = dumpit;
+ WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
+ (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
link->flags |= flags;
/* publish protocol:msgtype */
@@ -2607,17 +2644,23 @@ static int do_set_proto_down(struct net_device *dev,
static int do_setlink(const struct sk_buff *skb,
struct net_device *dev, struct ifinfomsg *ifm,
struct netlink_ext_ack *extack,
- struct nlattr **tb, char *ifname, int status)
+ struct nlattr **tb, int status)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ char ifname[IFNAMSIZ];
int err;
err = validate_linkmsg(dev, tb, extack);
if (err < 0)
return err;
+ if (tb[IFLA_IFNAME])
+ nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ ifname[0] = '\0';
+
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
- const char *pat = ifname && ifname[0] ? ifname : NULL;
+ const char *pat = ifname[0] ? ifname : NULL;
struct net *net;
int new_ifindex;
@@ -2973,21 +3016,16 @@ errout:
}
static struct net_device *rtnl_dev_get(struct net *net,
- struct nlattr *ifname_attr,
- struct nlattr *altifname_attr,
- char *ifname)
-{
- char buffer[ALTIFNAMSIZ];
-
- if (!ifname) {
- ifname = buffer;
- if (ifname_attr)
- nla_strscpy(ifname, ifname_attr, IFNAMSIZ);
- else if (altifname_attr)
- nla_strscpy(ifname, altifname_attr, ALTIFNAMSIZ);
- else
- return NULL;
- }
+ struct nlattr *tb[])
+{
+ char ifname[ALTIFNAMSIZ];
+
+ if (tb[IFLA_IFNAME])
+ nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else if (tb[IFLA_ALT_IFNAME])
+ nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
+ else
+ return NULL;
return __dev_get_by_name(net, ifname);
}
@@ -3000,7 +3038,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev;
int err;
struct nlattr *tb[IFLA_MAX+1];
- char ifname[IFNAMSIZ];
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
ifla_policy, extack);
@@ -3011,17 +3048,12 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout;
- if (tb[IFLA_IFNAME])
- nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- ifname[0] = '\0';
-
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+ dev = rtnl_dev_get(net, tb);
else
goto errout;
@@ -3030,7 +3062,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
}
- err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
+ err = do_setlink(skb, dev, ifm, extack, tb, 0);
errout:
return err;
}
@@ -3119,15 +3151,14 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
- tb[IFLA_ALT_IFNAME], NULL);
+ dev = rtnl_dev_get(net, tb);
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
goto out;
if (!dev) {
- if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
+ if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
err = -ENODEV;
goto out;
@@ -3262,7 +3293,7 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
for_each_netdev_safe(net, dev, aux) {
if (dev->group == group) {
- err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
+ err = do_setlink(skb, dev, ifm, extack, tb, 0);
if (err < 0)
return err;
}
@@ -3271,24 +3302,118 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
return 0;
}
-static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attr, struct netlink_ext_ack *extack)
+static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
+ const struct rtnl_link_ops *ops,
+ struct nlattr **tb, struct nlattr **data,
+ struct netlink_ext_ack *extack)
{
- struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
unsigned char name_assign_type = NET_NAME_USER;
+ struct net *net = sock_net(skb->sk);
+ struct net *dest_net, *link_net;
+ struct net_device *dev;
+ char ifname[IFNAMSIZ];
+ int err;
+
+ if (!ops->alloc && !ops->setup)
+ return -EOPNOTSUPP;
+
+ if (tb[IFLA_IFNAME]) {
+ nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ } else {
+ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+ name_assign_type = NET_NAME_ENUM;
+ }
+
+ dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+
+ if (tb[IFLA_LINK_NETNSID]) {
+ int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
+
+ link_net = get_net_ns_by_id(dest_net, id);
+ if (!link_net) {
+ NL_SET_ERR_MSG(extack, "Unknown network namespace id");
+ err = -EINVAL;
+ goto out;
+ }
+ err = -EPERM;
+ if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+ } else {
+ link_net = NULL;
+ }
+
+ dev = rtnl_create_link(link_net ? : dest_net, ifname,
+ name_assign_type, ops, tb, extack);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out;
+ }
+
+ dev->ifindex = ifm->ifi_index;
+
+ if (ops->newlink)
+ err = ops->newlink(link_net ? : net, dev, tb, data, extack);
+ else
+ err = register_netdevice(dev);
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
+ }
+
+ err = rtnl_configure_link(dev, ifm);
+ if (err < 0)
+ goto out_unregister;
+ if (link_net) {
+ err = dev_change_net_namespace(dev, dest_net, ifname);
+ if (err < 0)
+ goto out_unregister;
+ }
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+ if (err)
+ goto out_unregister;
+ }
+out:
+ if (link_net)
+ put_net(link_net);
+ put_net(dest_net);
+ return err;
+out_unregister:
+ if (ops->newlink) {
+ LIST_HEAD(list_kill);
+
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ } else {
+ unregister_netdevice(dev);
+ }
+ goto out;
+}
+
+struct rtnl_newlink_tbs {
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct nlattr *attr[RTNL_MAX_TYPE + 1];
+ struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
+};
+
+static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct rtnl_newlink_tbs *tbs,
+ struct netlink_ext_ack *extack)
+{
struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
+ struct nlattr ** const tb = tbs->tb;
const struct rtnl_link_ops *m_ops;
struct net_device *master_dev;
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
- struct nlattr *tb[IFLA_MAX + 1];
- struct net *dest_net, *link_net;
struct nlattr **slave_data;
char kind[MODULE_NAME_LEN];
struct net_device *dev;
struct ifinfomsg *ifm;
- char ifname[IFNAMSIZ];
struct nlattr **data;
+ bool link_specified;
int err;
#ifdef CONFIG_MODULES
@@ -3303,18 +3428,17 @@ replay:
if (err < 0)
return err;
- if (tb[IFLA_IFNAME])
- nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- ifname[0] = '\0';
-
ifm = nlmsg_data(nlh);
- if (ifm->ifi_index > 0)
+ if (ifm->ifi_index > 0) {
+ link_specified = true;
dev = __dev_get_by_index(net, ifm->ifi_index);
- else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
- else
+ } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
+ link_specified = true;
+ dev = rtnl_dev_get(net, tb);
+ } else {
+ link_specified = false;
dev = NULL;
+ }
master_dev = NULL;
m_ops = NULL;
@@ -3351,12 +3475,12 @@ replay:
return -EINVAL;
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
- err = nla_parse_nested_deprecated(attr, ops->maxtype,
+ err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
linkinfo[IFLA_INFO_DATA],
ops->policy, extack);
if (err < 0)
return err;
- data = attr;
+ data = tbs->attr;
}
if (ops->validate) {
err = ops->validate(tb, data, extack);
@@ -3372,14 +3496,14 @@ replay:
if (m_ops->slave_maxtype &&
linkinfo[IFLA_INFO_SLAVE_DATA]) {
- err = nla_parse_nested_deprecated(slave_attr,
+ err = nla_parse_nested_deprecated(tbs->slave_attr,
m_ops->slave_maxtype,
linkinfo[IFLA_INFO_SLAVE_DATA],
m_ops->slave_policy,
extack);
if (err < 0)
return err;
- slave_data = slave_attr;
+ slave_data = tbs->slave_attr;
}
}
@@ -3413,11 +3537,16 @@ replay:
status |= DO_SETLINK_NOTIFY;
}
- return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
+ return do_setlink(skb, dev, ifm, extack, tb, status);
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
- if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
+ * or it's for a group
+ */
+ if (link_specified)
+ return -ENODEV;
+ if (tb[IFLA_GROUP])
return rtnl_group_changelink(skb, net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, extack, tb);
@@ -3442,94 +3571,21 @@ replay:
return -EOPNOTSUPP;
}
- if (!ops->alloc && !ops->setup)
- return -EOPNOTSUPP;
-
- if (!ifname[0]) {
- snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
- name_assign_type = NET_NAME_ENUM;
- }
-
- dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
-
- if (tb[IFLA_LINK_NETNSID]) {
- int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
-
- link_net = get_net_ns_by_id(dest_net, id);
- if (!link_net) {
- NL_SET_ERR_MSG(extack, "Unknown network namespace id");
- err = -EINVAL;
- goto out;
- }
- err = -EPERM;
- if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
- goto out;
- } else {
- link_net = NULL;
- }
-
- dev = rtnl_create_link(link_net ? : dest_net, ifname,
- name_assign_type, ops, tb, extack);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
- goto out;
- }
-
- dev->ifindex = ifm->ifi_index;
-
- if (ops->newlink)
- err = ops->newlink(link_net ? : net, dev, tb, data, extack);
- else
- err = register_netdevice(dev);
- if (err < 0) {
- free_netdev(dev);
- goto out;
- }
-
- err = rtnl_configure_link(dev, ifm);
- if (err < 0)
- goto out_unregister;
- if (link_net) {
- err = dev_change_net_namespace(dev, dest_net, ifname);
- if (err < 0)
- goto out_unregister;
- }
- if (tb[IFLA_MASTER]) {
- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
- if (err)
- goto out_unregister;
- }
-out:
- if (link_net)
- put_net(link_net);
- put_net(dest_net);
- return err;
-out_unregister:
- if (ops->newlink) {
- LIST_HEAD(list_kill);
-
- ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
- } else {
- unregister_netdevice(dev);
- }
- goto out;
+ return rtnl_newlink_create(skb, ifm, ops, tb, data, extack);
}
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- struct nlattr **attr;
+ struct rtnl_newlink_tbs *tbs;
int ret;
- attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
- if (!attr)
+ tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
+ if (!tbs)
return -ENOMEM;
- ret = __rtnl_newlink(skb, nlh, attr, extack);
- kfree(attr);
+ ret = __rtnl_newlink(skb, nlh, tbs, extack);
+ kfree(tbs);
return ret;
}
@@ -3617,8 +3673,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME],
- tb[IFLA_ALT_IFNAME], NULL);
+ dev = rtnl_dev_get(tgt_net, tb);
else
goto out;
@@ -3713,8 +3768,7 @@ static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
- tb[IFLA_ALT_IFNAME], NULL);
+ dev = rtnl_dev_get(net, tb);
else
return -EINVAL;
@@ -4132,22 +4186,36 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
}
EXPORT_SYMBOL(ndo_dflt_fdb_del);
+static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
+ [NDA_VLAN] = { .type = NLA_U16 },
+ [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
+ [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
+ [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
+};
+
static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
struct net *net = sock_net(skb->sk);
+ const struct net_device_ops *ops;
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
- __u8 *addr;
+ __u8 *addr = NULL;
int err;
u16 vid;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
- extack);
+ if (!del_bulk) {
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
+ NULL, extack);
+ } else {
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
+ fdb_del_bulk_policy, extack);
+ }
if (err < 0)
return err;
@@ -4163,9 +4231,12 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -ENODEV;
}
- if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
- NL_SET_ERR_MSG(extack, "invalid address");
- return -EINVAL;
+ if (!del_bulk) {
+ if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
+ NL_SET_ERR_MSG(extack, "invalid address");
+ return -EINVAL;
+ }
+ addr = nla_data(tb[NDA_LLADDR]);
}
if (dev->type != ARPHRD_ETHER) {
@@ -4173,8 +4244,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
- addr = nla_data(tb[NDA_LLADDR]);
-
err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
if (err)
return err;
@@ -4185,10 +4254,16 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
netif_is_bridge_port(dev)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- const struct net_device_ops *ops = br_dev->netdev_ops;
- if (ops->ndo_fdb_del)
- err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
+ ops = br_dev->netdev_ops;
+ if (!del_bulk) {
+ if (ops->ndo_fdb_del)
+ err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
+ } else {
+ if (ops->ndo_fdb_del_bulk)
+ err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
+ extack);
+ }
if (err)
goto out;
@@ -4198,15 +4273,24 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Embedded bridge, macvlan, and any other device support */
if (ndm->ndm_flags & NTF_SELF) {
- if (dev->netdev_ops->ndo_fdb_del)
- err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
- vid);
- else
- err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
+ ops = dev->netdev_ops;
+ if (!del_bulk) {
+ if (ops->ndo_fdb_del)
+ err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
+ else
+ err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
+ } else {
+ /* in case err was cleared by NTF_MASTER call */
+ err = -EOPNOTSUPP;
+ if (ops->ndo_fdb_del_bulk)
+ err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
+ extack);
+ }
if (!err) {
- rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
- ndm->ndm_state);
+ if (!del_bulk)
+ rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
+ ndm->ndm_state);
ndm->ndm_flags &= ~NTF_SELF;
}
}
@@ -5896,11 +5980,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct net *net = sock_net(skb->sk);
struct rtnl_link *link;
+ enum rtnl_kinds kind;
struct module *owner;
int err = -EOPNOTSUPP;
rtnl_doit_func doit;
unsigned int flags;
- int kind;
int family;
int type;
@@ -5915,13 +5999,13 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
- kind = type&3;
+ kind = rtnl_msgtype_kind(type);
- if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
+ if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
rcu_read_lock();
- if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+ if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
u32 min_dump_alloc = 0;
@@ -5977,6 +6061,12 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
flags = link->flags;
+ if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
+ !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
+ NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
+ goto err_unlock;
+ }
+
if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
doit = link->doit;
rcu_read_unlock();
@@ -6105,7 +6195,8 @@ void __init rtnetlink_init(void)
rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
+ rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
+ RTNL_FLAG_BULK_DEL_SUPPORTED);
rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 30b523fa4ad2..475183f37891 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -80,7 +80,6 @@
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
-#include "datagram.h"
#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
@@ -204,7 +203,7 @@ static void __build_skb_around(struct sk_buff *skb, void *data,
skb_set_end_offset(skb, size);
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
-
+ skb->alloc_cpu = raw_smp_processor_id();
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
@@ -1037,6 +1036,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
+ CHECK_SKB_FIELD(alloc_cpu);
#ifdef CONFIG_XPS
CHECK_SKB_FIELD(sender_cpu);
#endif
@@ -1339,18 +1339,11 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
}
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
-{
- return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
-}
-EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
-
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
{
struct ubuf_info *orig_uarg = skb_zcopy(skb);
- struct iov_iter orig_iter = msg->msg_iter;
int err, orig_len = skb->len;
/* An skb can only point to one uarg. This edge case happens when
@@ -1364,7 +1357,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct sock *save_sk = skb->sk;
/* Streams do not free skb on error. Reset to prev state. */
- msg->msg_iter = orig_iter;
+ iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
skb->sk = sk;
___pskb_trim(skb, orig_len);
skb->sk = save_sk;
@@ -5601,7 +5594,7 @@ err_free:
}
EXPORT_SYMBOL(skb_vlan_untag);
-int skb_ensure_writable(struct sk_buff *skb, int write_len)
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
@@ -6486,3 +6479,51 @@ free_now:
}
EXPORT_SYMBOL(__skb_ext_put);
#endif /* CONFIG_SKB_EXTENSIONS */
+
+/**
+ * skb_attempt_defer_free - queue skb for remote freeing
+ * @skb: buffer
+ *
+ * Put @skb in a per-cpu list, using the cpu which
+ * allocated the skb/pages to reduce false sharing
+ * and memory zone spinlock contention.
+ */
+void skb_attempt_defer_free(struct sk_buff *skb)
+{
+ int cpu = skb->alloc_cpu;
+ struct softnet_data *sd;
+ unsigned long flags;
+ bool kick;
+
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
+ !cpu_online(cpu) ||
+ cpu == raw_smp_processor_id()) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ sd = &per_cpu(softnet_data, cpu);
+ /* We do not send an IPI or any signal.
+ * Remote cpu will eventually call skb_defer_free_flush()
+ */
+ spin_lock_irqsave(&sd->defer_lock, flags);
+ skb->next = sd->defer_list;
+ /* Paired with READ_ONCE() in skb_defer_free_flush() */
+ WRITE_ONCE(sd->defer_list, skb);
+ sd->defer_count++;
+
+ /* kick every time queue length reaches 128.
+ * This should avoid blocking in smp_call_function_single_async().
+ * This condition should hardly be bit under normal conditions,
+ * unless cpu suddenly stopped to receive NIC interrupts.
+ */
+ kick = sd->defer_count == 128;
+
+ spin_unlock_irqrestore(&sd->defer_lock, flags);
+
+ /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
+ * if we are unlucky enough (this seems very unlikely).
+ */
+ if (unlikely(kick))
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+}
diff --git a/net/core/sock.c b/net/core/sock.c
index 1180a0cb0110..be20a1af20e5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -141,9 +141,14 @@
#include <linux/ethtool.h>
+#include "dev.h"
+
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
+static void sock_def_write_space_wfree(struct sock *sk);
+static void sock_def_write_space(struct sock *sk);
+
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
@@ -503,17 +508,35 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(__sock_queue_rcv_skb);
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
{
+ enum skb_drop_reason drop_reason;
int err;
err = sk_filter(sk, skb);
- if (err)
- return err;
-
- return __sock_queue_rcv_skb(sk, skb);
+ if (err) {
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ goto out;
+ }
+ err = __sock_queue_rcv_skb(sk, skb);
+ switch (err) {
+ case -ENOMEM:
+ drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
+ break;
+ case -ENOBUFS:
+ drop_reason = SKB_DROP_REASON_PROTO_MEM;
+ break;
+ default:
+ drop_reason = SKB_NOT_DROPPED_YET;
+ break;
+ }
+out:
+ if (reason)
+ *reason = drop_reason;
+ return err;
}
-EXPORT_SYMBOL(sock_queue_rcv_skb);
+EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested, unsigned int trim_cap, bool refcounted)
@@ -1291,6 +1314,9 @@ set_sndbuf:
__sock_set_mark(sk, val);
break;
+ case SO_RCVMARK:
+ sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
+ break;
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
@@ -1717,6 +1743,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_mark;
break;
+ case SO_RCVMARK:
+ v.val = sock_flag(sk, SOCK_RCVMARK);
+ break;
+
case SO_RXQ_OVFL:
v.val = sock_flag(sk, SOCK_RXQ_OVFL);
break;
@@ -2062,9 +2092,6 @@ void sk_destruct(struct sock *sk)
{
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
- WARN_ON_ONCE(!llist_empty(&sk->defer_list));
- sk_defer_free_flush(sk);
-
if (rcu_access_pointer(sk->sk_reuseport_cb)) {
reuseport_detach_sock(sk);
use_call_rcu = true;
@@ -2300,8 +2327,20 @@ void sock_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
unsigned int len = skb->truesize;
+ bool free;
if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
+ if (sock_flag(sk, SOCK_RCU_FREE) &&
+ sk->sk_write_space == sock_def_write_space) {
+ rcu_read_lock();
+ free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
+ sock_def_write_space_wfree(sk);
+ rcu_read_unlock();
+ if (unlikely(free))
+ __sk_free(sk);
+ return;
+ }
+
/*
* Keep a reference on sk_wmem_alloc, this will be released
* after sk_write_space() call
@@ -2611,13 +2650,6 @@ failure:
}
EXPORT_SYMBOL(sock_alloc_send_pskb);
-struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
- int noblock, int *errcode)
-{
- return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
-}
-EXPORT_SYMBOL(sock_alloc_send_skb);
-
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc)
{
@@ -3174,20 +3206,42 @@ static void sock_def_write_space(struct sock *sk)
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
+ if (sock_writeable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
- if (sock_writeable(sk))
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
+/* An optimised version of sock_def_write_space(), should only be called
+ * for SOCK_RCU_FREE sockets under RCU read section and after putting
+ * ->sk_wmem_alloc.
+ */
+static void sock_def_write_space_wfree(struct sock *sk)
+{
+ /* Do not wake up a writer until he can make "significant"
+ * progress. --DaveM
+ */
+ if (sock_writeable(sk)) {
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ /* rely on refcount_sub from sock_wfree() */
+ smp_mb__after_atomic();
+ if (wq && waitqueue_active(&wq->wait))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
+
+ /* Should agree with poll, otherwise some programs break */
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ }
+}
+
static void sock_def_destruct(struct sock *sk)
{
}
@@ -3486,8 +3540,7 @@ int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int addr_len = 0;
int err;
- err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
+ err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 2d213c4011db..81d4b4756a02 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -793,7 +793,7 @@ static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_priv_size = sizeof(struct sock_map_seq_info),
};
-static int sock_map_btf_id;
+BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
const struct bpf_map_ops sock_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_map_alloc,
@@ -805,8 +805,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_lookup_elem = sock_map_lookup,
.map_release_uref = sock_map_release_progs,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_stab",
- .map_btf_id = &sock_map_btf_id,
+ .map_btf_id = &sock_map_btf_ids[0],
.iter_seq_info = &sock_map_iter_seq_info,
};
@@ -1385,7 +1384,7 @@ static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_priv_size = sizeof(struct sock_hash_seq_info),
};
-static int sock_hash_map_btf_id;
+BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
const struct bpf_map_ops sock_hash_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_hash_alloc,
@@ -1397,8 +1396,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "bpf_shtab",
- .map_btf_id = &sock_hash_map_btf_id,
+ .map_btf_id = &sock_hash_map_btf_ids[0],
.iter_seq_info = &sock_hash_iter_seq_info,
};
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7123fe7feeac..195ca5c28771 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,13 +23,12 @@
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
-static int two = 2;
-static int three = 3;
+#include "dev.h"
+
static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
-static long long_one __maybe_unused = 1;
static long long_max __maybe_unused = LONG_MAX;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -388,7 +387,7 @@ static struct ctl_table net_core_table[] = {
.extra2 = SYSCTL_ONE,
# else
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
# endif
},
# ifdef CONFIG_HAVE_EBPF_JIT
@@ -399,7 +398,7 @@ static struct ctl_table net_core_table[] = {
.mode = 0600,
.proc_handler = proc_dointvec_minmax_bpf_restricted,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "bpf_jit_kallsyms",
@@ -417,7 +416,7 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(long),
.mode = 0600,
.proc_handler = proc_dolongvec_minmax_bpf_restricted,
- .extra1 = &long_one,
+ .extra1 = SYSCTL_LONG_ONE,
.extra2 = &bpf_jit_limit_max,
},
#endif
@@ -544,7 +543,7 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "devconf_inherit_init_net",
@@ -553,7 +552,7 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &three,
+ .extra2 = SYSCTL_THREE,
},
{
.procname = "high_order_alloc_disable",
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 671c377f0889..7dfc00c9fb32 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -293,8 +293,8 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
- int flags, int *addr_len);
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len);
void dccp_shutdown(struct sock *sk, int how);
int inet_dccp_listen(struct socket *sock, int backlog);
__poll_t dccp_poll(struct file *file, struct socket *sock,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae662567a6cb..82696ab86f74 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -76,9 +76,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
orig_dport = usin->sin_port;
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
- IPPROTO_DCCP,
- orig_sport, orig_dport, sk);
+ sk->sk_bound_dev_if, IPPROTO_DCCP, orig_sport,
+ orig_dport, sk);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index eab3bd1ee9a0..4d95b6400915 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -892,7 +892,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
- if (__ipv6_only_sock(sk))
+ if (ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a976b4d29892..58421f94427e 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -791,8 +791,8 @@ out_discard:
EXPORT_SYMBOL_GPL(dccp_sendmsg);
-int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
- int flags, int *addr_len)
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len)
{
const struct dccp_hdr *dh;
long timeo;
@@ -804,7 +804,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
goto out;
}
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 89c6c86e746f..0c6ae32742ec 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -7,19 +7,10 @@
#include <linux/device.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/of.h>
-#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
-#include <linux/phy_fixed.h>
#include <linux/ptp_classify.h>
-#include <linux/etherdevice.h>
#include "dsa_priv.h"
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 5d3f4a67dce1..7c9abd5a0ab9 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -54,18 +54,15 @@ struct dsa_notifier_ageing_time_info {
/* DSA_NOTIFIER_BRIDGE_* */
struct dsa_notifier_bridge_info {
+ const struct dsa_port *dp;
struct dsa_bridge bridge;
- int tree_index;
- int sw_index;
- int port;
bool tx_fwd_offload;
struct netlink_ext_ack *extack;
};
/* DSA_NOTIFIER_FDB_* */
struct dsa_notifier_fdb_info {
- int sw_index;
- int port;
+ const struct dsa_port *dp;
const unsigned char *addr;
u16 vid;
struct dsa_db db;
@@ -81,34 +78,28 @@ struct dsa_notifier_lag_fdb_info {
/* DSA_NOTIFIER_MDB_* */
struct dsa_notifier_mdb_info {
+ const struct dsa_port *dp;
const struct switchdev_obj_port_mdb *mdb;
- int sw_index;
- int port;
struct dsa_db db;
};
/* DSA_NOTIFIER_LAG_* */
struct dsa_notifier_lag_info {
+ const struct dsa_port *dp;
struct dsa_lag lag;
- int sw_index;
- int port;
-
struct netdev_lag_upper_info *info;
};
/* DSA_NOTIFIER_VLAN_* */
struct dsa_notifier_vlan_info {
+ const struct dsa_port *dp;
const struct switchdev_obj_port_vlan *vlan;
- int sw_index;
- int port;
struct netlink_ext_ack *extack;
};
/* DSA_NOTIFIER_MTU */
struct dsa_notifier_mtu_info {
- bool targeted_match;
- int sw_index;
- int port;
+ const struct dsa_port *dp;
int mtu;
};
@@ -119,9 +110,7 @@ struct dsa_notifier_tag_proto_info {
/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
struct dsa_notifier_tag_8021q_vlan_info {
- int tree_index;
- int sw_index;
- int port;
+ const struct dsa_port *dp;
u16 vid;
};
@@ -241,8 +230,7 @@ int dsa_port_mst_enable(struct dsa_port *dp, bool on,
struct netlink_ext_ack *extack);
int dsa_port_vlan_msti(struct dsa_port *dp,
const struct switchdev_vlan_msti *msti);
-int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool targeted_match);
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
diff --git a/net/dsa/port.c b/net/dsa/port.c
index cdc56ba11f52..48e5a309ca5c 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -242,6 +242,59 @@ void dsa_port_disable(struct dsa_port *dp)
rtnl_unlock();
}
+static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
+ struct dsa_bridge bridge)
+{
+ struct netlink_ext_ack extack = {0};
+ bool change_vlan_filtering = false;
+ struct dsa_switch *ds = dp->ds;
+ bool vlan_filtering;
+ int err;
+
+ if (ds->needs_standalone_vlan_filtering &&
+ !br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = true;
+ } else if (!ds->needs_standalone_vlan_filtering &&
+ br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = false;
+ }
+
+ /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
+ * event for changing vlan_filtering setting upon slave ports leaving
+ * it. That is a good thing, because that lets us handle it and also
+ * handle the case where the switch's vlan_filtering setting is global
+ * (not per port). When that happens, the correct moment to trigger the
+ * vlan_filtering callback is only when the last port leaves the last
+ * VLAN-aware bridge.
+ */
+ if (change_vlan_filtering && ds->vlan_filtering_is_global) {
+ dsa_switch_for_each_port(dp, ds) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br_vlan_enabled(br)) {
+ change_vlan_filtering = false;
+ break;
+ }
+ }
+ }
+
+ if (!change_vlan_filtering)
+ return;
+
+ err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack);
+ if (extack._msg) {
+ dev_err(ds->dev, "port %d: %s\n", dp->index,
+ extack._msg);
+ }
+ if (err && err != -EOPNOTSUPP) {
+ dev_err(ds->dev,
+ "port %d failed to reset VLAN filtering to %d: %pe\n",
+ dp->index, vlan_filtering, ERR_PTR(err));
+ }
+}
+
static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
@@ -313,7 +366,8 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
return 0;
}
-static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
+static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp,
+ struct dsa_bridge bridge)
{
/* Configure the port for standalone mode (no address learning,
* flood everything).
@@ -333,7 +387,7 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
*/
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
- /* VLAN filtering is handled by dsa_switch_bridge_leave */
+ dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it
* here because we have no good reason (or value) to change it to.
@@ -405,9 +459,7 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.extack = extack,
};
struct net_device *dev = dp->slave;
@@ -476,9 +528,7 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
int err;
@@ -501,15 +551,14 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
"port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
- dsa_port_switchdev_unsync_attrs(dp);
+ dsa_port_switchdev_unsync_attrs(dp, info.bridge);
}
int dsa_port_lag_change(struct dsa_port *dp,
struct netdev_lag_lower_state_info *linfo)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
bool tx_enabled;
@@ -578,8 +627,7 @@ int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.info = uinfo,
};
struct net_device *bridge_dev;
@@ -624,8 +672,7 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
int err;
@@ -883,13 +930,10 @@ int dsa_port_vlan_msti(struct dsa_port *dp,
return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
}
-int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool targeted_match)
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu)
{
struct dsa_notifier_mtu_info info = {
- .sw_index = dp->ds->index,
- .targeted_match = targeted_match,
- .port = dp->index,
+ .dp = dp,
.mtu = new_mtu,
};
@@ -900,8 +944,7 @@ int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = {
@@ -924,8 +967,7 @@ int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = {
@@ -945,8 +987,7 @@ static int dsa_port_host_fdb_add(struct dsa_port *dp,
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = db,
@@ -997,8 +1038,7 @@ static int dsa_port_host_fdb_del(struct dsa_port *dp,
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
.db = db,
@@ -1093,8 +1133,7 @@ int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
@@ -1112,8 +1151,7 @@ int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
@@ -1132,8 +1170,7 @@ static int dsa_port_host_mdb_add(const struct dsa_port *dp,
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = db,
};
@@ -1177,8 +1214,7 @@ static int dsa_port_host_mdb_del(const struct dsa_port *dp,
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
.db = db,
};
@@ -1222,8 +1258,7 @@ int dsa_port_vlan_add(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
.extack = extack,
};
@@ -1235,8 +1270,7 @@ int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
};
@@ -1248,8 +1282,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
.extack = extack,
};
@@ -1269,8 +1302,7 @@ int dsa_port_host_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
};
struct dsa_port *cpu_dp = dp->cpu_dp;
@@ -1691,9 +1723,7 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
@@ -1706,9 +1736,7 @@ int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
int err;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8022d50584db..5ee0aced9410 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1806,11 +1806,9 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- struct dsa_port *dp_iter;
- struct dsa_port *cpu_dp;
- int port = p->dp->index;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_port *other_dp;
int largest_mtu = 0;
int new_master_mtu;
int old_master_mtu;
@@ -1821,33 +1819,28 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
- list_for_each_entry(dp_iter, &ds->dst->ports, list) {
+ dsa_tree_for_each_user_port(other_dp, ds->dst) {
int slave_mtu;
- if (!dsa_port_is_user(dp_iter))
- continue;
-
/* During probe, this function will be called for each slave
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
- if (!dp_iter->slave)
+ if (!other_dp->slave)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
- if (dp_iter == dp)
+ if (dp == other_dp)
slave_mtu = new_mtu;
else
- slave_mtu = dp_iter->slave->mtu;
+ slave_mtu = other_dp->slave->mtu;
if (largest_mtu < slave_mtu)
largest_mtu = slave_mtu;
}
- cpu_dp = dsa_to_port(ds, port)->cpu_dp;
-
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
old_master_mtu = master->mtu;
new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
@@ -1866,15 +1859,14 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
goto out_master_failed;
/* We only need to propagate the MTU of the CPU port to
- * upstream switches, so create a non-targeted notifier which
- * updates all switches.
+ * upstream switches, so emit a notifier which updates them.
*/
- err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
+ err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
if (err)
goto out_cpu_failed;
}
- err = dsa_port_mtu_change(dp, new_mtu, true);
+ err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
if (err)
goto out_port_failed;
@@ -1887,8 +1879,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu -
- dsa_tag_protocol_overhead(cpu_dp->tag_ops),
- false);
+ dsa_tag_protocol_overhead(cpu_dp->tag_ops));
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index d25cd1da3eb3..704975e5c1c2 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -49,19 +49,7 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
static bool dsa_port_mtu_match(struct dsa_port *dp,
struct dsa_notifier_mtu_info *info)
{
- if (dp->ds->index == info->sw_index && dp->index == info->port)
- return true;
-
- /* Do not propagate to other switches in the tree if the notifier was
- * targeted for a single switch.
- */
- if (info->targeted_match)
- return false;
-
- if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
- return true;
-
- return false;
+ return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
}
static int dsa_switch_mtu(struct dsa_switch *ds,
@@ -88,25 +76,26 @@ static int dsa_switch_mtu(struct dsa_switch *ds,
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
- struct dsa_switch_tree *dst = ds->dst;
int err;
- if (dst->index == info->tree_index && ds->index == info->sw_index) {
+ if (info->dp->ds == ds) {
if (!ds->ops->port_bridge_join)
return -EOPNOTSUPP;
- err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
+ err = ds->ops->port_bridge_join(ds, info->dp->index,
+ info->bridge,
&info->tx_fwd_offload,
info->extack);
if (err)
return err;
}
- if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_join) {
- err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
- info->sw_index,
- info->port, info->bridge,
+ if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
+ err = ds->ops->crosschip_bridge_join(ds,
+ info->dp->ds->dst->index,
+ info->dp->ds->index,
+ info->dp->index,
+ info->bridge,
info->extack);
if (err)
return err;
@@ -115,79 +104,18 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds,
return 0;
}
-static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
- struct dsa_notifier_bridge_info *info)
-{
- struct netlink_ext_ack extack = {0};
- bool change_vlan_filtering = false;
- bool vlan_filtering;
- struct dsa_port *dp;
- int err;
-
- if (ds->needs_standalone_vlan_filtering &&
- !br_vlan_enabled(info->bridge.dev)) {
- change_vlan_filtering = true;
- vlan_filtering = true;
- } else if (!ds->needs_standalone_vlan_filtering &&
- br_vlan_enabled(info->bridge.dev)) {
- change_vlan_filtering = true;
- vlan_filtering = false;
- }
-
- /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
- * event for changing vlan_filtering setting upon slave ports leaving
- * it. That is a good thing, because that lets us handle it and also
- * handle the case where the switch's vlan_filtering setting is global
- * (not per port). When that happens, the correct moment to trigger the
- * vlan_filtering callback is only when the last port leaves the last
- * VLAN-aware bridge.
- */
- if (change_vlan_filtering && ds->vlan_filtering_is_global) {
- dsa_switch_for_each_port(dp, ds) {
- struct net_device *br = dsa_port_bridge_dev_get(dp);
-
- if (br && br_vlan_enabled(br)) {
- change_vlan_filtering = false;
- break;
- }
- }
- }
-
- if (change_vlan_filtering) {
- err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
- vlan_filtering, &extack);
- if (extack._msg)
- dev_err(ds->dev, "port %d: %s\n", info->port,
- extack._msg);
- if (err && err != -EOPNOTSUPP)
- return err;
- }
-
- return 0;
-}
-
static int dsa_switch_bridge_leave(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
- struct dsa_switch_tree *dst = ds->dst;
- int err;
+ if (info->dp->ds == ds && ds->ops->port_bridge_leave)
+ ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
- if (dst->index == info->tree_index && ds->index == info->sw_index &&
- ds->ops->port_bridge_leave)
- ds->ops->port_bridge_leave(ds, info->port, info->bridge);
-
- if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_leave)
- ds->ops->crosschip_bridge_leave(ds, info->tree_index,
- info->sw_index, info->port,
+ if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
+ ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
+ info->dp->ds->index,
+ info->dp->index,
info->bridge);
- if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
- err = dsa_switch_sync_vlan_filtering(ds, info);
- if (err)
- return err;
- }
-
return 0;
}
@@ -196,16 +124,11 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
* emitted and its dedicated CPU port.
*/
static bool dsa_port_host_address_match(struct dsa_port *dp,
- int info_sw_index, int info_port)
+ const struct dsa_port *targeted_dp)
{
- struct dsa_port *targeted_dp, *cpu_dp;
- struct dsa_switch *targeted_ds;
-
- targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
- targeted_dp = dsa_to_port(targeted_ds, info_port);
- cpu_dp = targeted_dp->cpu_dp;
+ struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
- if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
cpu_dp->index);
@@ -473,8 +396,7 @@ static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_address_match(dp, info->sw_index,
- info->port)) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
info->db);
if (err)
@@ -495,8 +417,7 @@ static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_address_match(dp, info->sw_index,
- info->port)) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
info->db);
if (err)
@@ -510,7 +431,7 @@ static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_add)
@@ -522,7 +443,7 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds,
static int dsa_switch_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_del)
@@ -570,12 +491,12 @@ static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_change)
- return ds->ops->port_lag_change(ds, info->port);
+ if (info->dp->ds == ds && ds->ops->port_lag_change)
+ return ds->ops->port_lag_change(ds, info->dp->index);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
- return ds->ops->crosschip_lag_change(ds, info->sw_index,
- info->port);
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
+ return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
+ info->dp->index);
return 0;
}
@@ -583,13 +504,13 @@ static int dsa_switch_lag_change(struct dsa_switch *ds,
static int dsa_switch_lag_join(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_join)
- return ds->ops->port_lag_join(ds, info->port, info->lag,
+ if (info->dp->ds == ds && ds->ops->port_lag_join)
+ return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
info->info);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
- return ds->ops->crosschip_lag_join(ds, info->sw_index,
- info->port, info->lag,
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
+ return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
+ info->dp->index, info->lag,
info->info);
return -EOPNOTSUPP;
@@ -598,12 +519,12 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
static int dsa_switch_lag_leave(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_leave)
- return ds->ops->port_lag_leave(ds, info->port, info->lag);
+ if (info->dp->ds == ds && ds->ops->port_lag_leave)
+ return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
- return ds->ops->crosschip_lag_leave(ds, info->sw_index,
- info->port, info->lag);
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
+ return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
+ info->dp->index, info->lag);
return -EOPNOTSUPP;
}
@@ -611,7 +532,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
static int dsa_switch_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_add)
@@ -623,7 +544,7 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
static int dsa_switch_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_del)
@@ -642,8 +563,7 @@ static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_address_match(dp, info->sw_index,
- info->port)) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
if (err)
break;
@@ -663,8 +583,7 @@ static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_address_match(dp, info->sw_index,
- info->port)) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
if (err)
break;
@@ -678,29 +597,18 @@ static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
static bool dsa_port_vlan_match(struct dsa_port *dp,
struct dsa_notifier_vlan_info *info)
{
- if (dp->ds->index == info->sw_index && dp->index == info->port)
- return true;
-
- if (dsa_port_is_dsa(dp))
- return true;
-
- return false;
+ return dsa_port_is_dsa(dp) || dp == info->dp;
}
/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
* (upstream and downstream) of that switch and its upstream switches.
*/
static bool dsa_port_host_vlan_match(struct dsa_port *dp,
- struct dsa_notifier_vlan_info *info)
+ const struct dsa_port *targeted_dp)
{
- struct dsa_port *targeted_dp, *cpu_dp;
- struct dsa_switch *targeted_ds;
-
- targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
- targeted_dp = dsa_to_port(targeted_ds, info->port);
- cpu_dp = targeted_dp->cpu_dp;
+ struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
- if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
return dsa_port_is_dsa(dp) || dp == cpu_dp;
return false;
@@ -858,7 +766,7 @@ static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_vlan_match(dp, info)) {
+ if (dsa_port_host_vlan_match(dp, info->dp)) {
err = dsa_port_do_vlan_add(dp, info->vlan,
info->extack);
if (err)
@@ -879,7 +787,7 @@ static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
- if (dsa_port_host_vlan_match(dp, info)) {
+ if (dsa_port_host_vlan_match(dp, info->dp)) {
err = dsa_port_do_vlan_del(dp, info->vlan);
if (err)
return err;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index a786569203f0..01a427800797 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -196,15 +196,7 @@ static bool
dsa_port_tag_8021q_vlan_match(struct dsa_port *dp,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
- struct dsa_switch *ds = dp->ds;
-
- if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
- return true;
-
- if (ds->dst->index == info->tree_index && ds->index == info->sw_index)
- return dp->index == info->port;
-
- return false;
+ return dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp) || dp == info->dp;
}
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index ebcc812735a4..62b89d6f54fd 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL(ether_setup);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs)
{
- return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
+ return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_ENUM,
ether_setup, txqs, rxqs);
}
EXPORT_SYMBOL(alloc_etherdev_mqs);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 0c5210015911..566adf85e658 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -201,6 +201,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(400000, CR4, Full),
__DEFINE_LINK_MODE_NAME(100, FX, Half),
__DEFINE_LINK_MODE_NAME(100, FX, Full),
+ __DEFINE_LINK_MODE_NAME(10, T1L, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -236,6 +237,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
#define __LINK_MODE_LANES_T1 1
#define __LINK_MODE_LANES_X 1
#define __LINK_MODE_LANES_FX 1
+#define __LINK_MODE_LANES_T1L 1
#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \
[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \
@@ -349,6 +351,7 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(400000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(100, FX, Half),
__DEFINE_LINK_MODE_PARAMS(100, FX, Full),
+ __DEFINE_LINK_MODE_PARAMS(10, T1L, Full),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 29d01662a48b..7919ddb2371c 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 9f33c9689b56..fa3ec8d438f7 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -55,7 +55,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_TX */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
- nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */
+ nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
+ nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
}
static int rings_fill_reply(struct sk_buff *skb,
@@ -94,7 +95,8 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
kr->tcp_data_split))) ||
(kr->cqe_size &&
- (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))))
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
+ nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
return -EMSGSIZE;
return 0;
@@ -123,6 +125,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
};
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
@@ -149,6 +152,33 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
if (!ops->get_ringparam || !ops->set_ringparam)
goto out_dev;
+ if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
+ "setting rx buf len not supported");
+ goto out_dev;
+ }
+
+ if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_CQE_SIZE],
+ "setting cqe size not supported");
+ goto out_dev;
+ }
+
+ if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_TX_PUSH],
+ "setting tx push not supported");
+ goto out_dev;
+ }
+
rtnl_lock();
ret = ethnl_ops_begin(dev);
if (ret < 0)
@@ -165,6 +195,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ethnl_update_u32(&kernel_ringparam.cqe_size,
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
+ ethnl_update_u8(&kernel_ringparam.tx_push,
+ tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
ret = 0;
if (!mod)
goto out_ops;
@@ -187,24 +219,6 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
goto out_ops;
}
- if (kernel_ringparam.rx_buf_len != 0 &&
- !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
- ret = -EOPNOTSUPP;
- NL_SET_ERR_MSG_ATTR(info->extack,
- tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
- "setting rx buf len not supported");
- goto out_ops;
- }
-
- if (kernel_ringparam.cqe_size &&
- !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
- ret = -EOPNOTSUPP;
- NL_SET_ERR_MSG_ATTR(info->extack,
- tb[ETHTOOL_A_RINGS_CQE_SIZE],
- "setting cqe size not supported");
- goto out_ops;
- }
-
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, info->extack);
if (ret < 0)
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 3b2366a88c3c..718fb77bb372 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -308,13 +308,13 @@ out:
}
static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -328,7 +328,7 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (err)
goto done;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
@@ -695,7 +695,7 @@ out:
}
static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
@@ -703,7 +703,7 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
struct dgram_sock *ro = dgram_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -718,7 +718,7 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (err)
goto done;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (saddr) {
/* Clear the implicit padding in struct sockaddr_ieee802154
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 87983e70f03f..e983bb0c5012 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -321,7 +321,6 @@ config NET_UDP_TUNNEL
config NET_FOU
tristate "IP: Foo (IP protocols) over UDP"
- select XFRM
select NET_UDP_TUNNEL
help
Foo over UDP allows any IP protocol to be directly encapsulated
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 72fde2888ad2..93da9f783bec 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -836,7 +836,7 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
EXPORT_SYMBOL(inet_sendpage);
INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
- size_t, int, int, int *));
+ size_t, int, int *));
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
@@ -848,8 +848,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
sock_rps_record_flow(sk);
err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
- sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
+ sk, msg, size, flags, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
@@ -1234,9 +1233,9 @@ static int inet_sk_reselect_saddr(struct sock *sk)
/* Query new route. */
fl4 = &inet->cork.fl.u.ip4;
- rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
- sk->sk_bound_dev_if, sk->sk_protocol,
- inet->inet_sport, inet->inet_dport, sk);
+ rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
+ sk->sk_protocol, inet->inet_sport,
+ inet->inet_dport, sk);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 2d0c05ca9c6f..ab4a5601c82a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1304,9 +1304,9 @@ static struct packet_type arp_packet_type __read_mostly = {
.func = arp_rcv,
};
+#ifdef CONFIG_PROC_FS
#if IS_ENABLED(CONFIG_AX25)
-/* ------------------------------------------------------------------------ */
/*
* ax25 -> ASCII conversion
*/
@@ -1412,16 +1412,13 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_SKIP_NOARP);
}
-/* ------------------------------------------------------------------------ */
-
static const struct seq_operations arp_seq_ops = {
.start = arp_seq_start,
.next = neigh_seq_next,
.stop = neigh_seq_stop,
.show = arp_seq_show,
};
-
-/* ------------------------------------------------------------------------ */
+#endif /* CONFIG_PROC_FS */
static int __net_init arp_net_init(struct net *net)
{
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 48f337ccf949..ffd57523331f 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -44,10 +44,9 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
saddr = inet->mc_addr;
}
fl4 = &inet->cork.fl.u.ip4;
- rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
- RT_CONN_FLAGS(sk), oif,
- sk->sk_protocol,
- inet->inet_sport, usin->sin_port, sk);
+ rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, oif,
+ sk->sk_protocol, inet->inet_sport,
+ usin->sin_port, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 53a6b14dc50a..89141ba5e9ee 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2573,7 +2573,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
struct devinet_sysctl_table *t;
char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
- t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
+ t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT);
if (!t)
goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index af8209f912ab..f361d3d56be2 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1384,7 +1384,7 @@ static void nl_fib_input(struct sk_buff *skb)
return;
nlh = nlmsg_hdr(skb);
- frn = (struct fib_result_nl *) nlmsg_data(nlh);
+ frn = nlmsg_data(nlh);
nl_fib_lookup(net, frn);
portid = NETLINK_CB(skb).portid; /* netlink portid */
@@ -1425,7 +1425,7 @@ static void fib_disable_ip(struct net_device *dev, unsigned long event,
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct in_ifaddr *ifa = ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct net *net = dev_net(dev);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 001fea394bde..513f475c6a53 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -145,7 +145,7 @@ INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
int flags,
struct fib_lookup_arg *arg)
{
- struct fib_result *result = (struct fib_result *) arg->result;
+ struct fib_result *result = arg->result;
struct net_device *dev = NULL;
if (result->fi) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ccb62038f6a4..a57ba23571c9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -524,7 +524,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
fri.tb_id = tb_id;
fri.dst = key;
fri.dst_len = dst_len;
- fri.tos = inet_dscp_to_dsfield(fa->fa_dscp);
+ fri.dscp = fa->fa_dscp;
fri.type = fa->fa_type;
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
@@ -1781,7 +1781,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
rtm->rtm_family = AF_INET;
rtm->rtm_dst_len = fri->dst_len;
rtm->rtm_src_len = 0;
- rtm->rtm_tos = fri->tos;
+ rtm->rtm_tos = inet_dscp_to_dsfield(fri->dscp);
if (tb_id < 256)
rtm->rtm_table = tb_id;
else
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index fb0e49c36c2e..2734c3af7e24 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -82,7 +82,7 @@ static int call_fib_entry_notifier(struct notifier_block *nb,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
- .tos = inet_dscp_to_dsfield(fa->fa_dscp),
+ .dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
@@ -99,7 +99,7 @@ static int call_fib_entry_notifiers(struct net *net,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
- .tos = inet_dscp_to_dsfield(fa->fa_dscp),
+ .dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
@@ -1032,8 +1032,8 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
if (fa->fa_slen == slen && fa->tb_id == fri->tb_id &&
- fa->fa_dscp == inet_dsfield_to_dscp(fri->tos) &&
- fa->fa_info == fri->fi && fa->fa_type == fri->type)
+ fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi &&
+ fa->fa_type == fri->type)
return fa;
}
@@ -2305,7 +2305,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
fri.tb_id = tb->tb_id;
fri.dst = xkey;
fri.dst_len = KEYLENGTH - fa->fa_slen;
- fri.tos = inet_dscp_to_dsfield(fa->fa_dscp);
+ fri.dscp = fa->fa_dscp;
fri.type = fa->fa_type;
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
@@ -2625,7 +2625,7 @@ static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
static int fib_triestat_seq_show(struct seq_file *seq, void *v)
{
- struct net *net = (struct net *)seq->private;
+ struct net *net = seq->private;
unsigned int h;
seq_printf(seq,
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 0d085cc8d96c..025a33c1b04d 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -16,7 +16,6 @@
#include <net/protocol.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
-#include <net/xfrm.h>
#include <uapi/linux/fou.h>
#include <uapi/linux/genetlink.h>
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 72a375c7f417..efea0e796f06 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(icmp_err_convert);
*/
struct icmp_control {
- bool (*handler)(struct sk_buff *skb);
+ enum skb_drop_reason (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
@@ -342,7 +342,7 @@ void icmp_out_count(struct net *net, unsigned char type)
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
- struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
+ struct icmp_bxm *icmp_param = from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
@@ -839,8 +839,9 @@ static bool icmp_tag_validation(int proto)
* ICMP_PARAMETERPROB.
*/
-static bool icmp_unreach(struct sk_buff *skb)
+static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
{
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
const struct iphdr *iph;
struct icmphdr *icmph;
struct net *net;
@@ -860,8 +861,10 @@ static bool icmp_unreach(struct sk_buff *skb)
icmph = icmp_hdr(skb);
iph = (const struct iphdr *)skb->data;
- if (iph->ihl < 5) /* Mangled header, drop. */
+ if (iph->ihl < 5) { /* Mangled header, drop. */
+ reason = SKB_DROP_REASON_IP_INHDR;
goto out_err;
+ }
switch (icmph->type) {
case ICMP_DEST_UNREACH:
@@ -941,10 +944,10 @@ static bool icmp_unreach(struct sk_buff *skb)
icmp_socket_deliver(skb, info);
out:
- return true;
+ return reason;
out_err:
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return false;
+ return reason ?: SKB_DROP_REASON_NOT_SPECIFIED;
}
@@ -952,20 +955,20 @@ out_err:
* Handle ICMP_REDIRECT.
*/
-static bool icmp_redirect(struct sk_buff *skb)
+static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
{
if (skb->len < sizeof(struct iphdr)) {
__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
- return false;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
/* there aught to be a stat */
- return false;
+ return SKB_DROP_REASON_NOMEM;
}
icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
- return true;
+ return SKB_NOT_DROPPED_YET;
}
/*
@@ -982,7 +985,7 @@ static bool icmp_redirect(struct sk_buff *skb)
* See also WRT handling of options once they are done and working.
*/
-static bool icmp_echo(struct sk_buff *skb)
+static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
{
struct icmp_bxm icmp_param;
struct net *net;
@@ -990,7 +993,7 @@ static bool icmp_echo(struct sk_buff *skb)
net = dev_net(skb_dst(skb)->dev);
/* should there be an ICMP stat for ignored echos? */
if (net->ipv4.sysctl_icmp_echo_ignore_all)
- return true;
+ return SKB_NOT_DROPPED_YET;
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.skb = skb;
@@ -1001,10 +1004,10 @@ static bool icmp_echo(struct sk_buff *skb)
if (icmp_param.data.icmph.type == ICMP_ECHO)
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
else if (!icmp_build_probe(skb, &icmp_param.data.icmph))
- return true;
+ return SKB_NOT_DROPPED_YET;
icmp_reply(&icmp_param, skb);
- return true;
+ return SKB_NOT_DROPPED_YET;
}
/* Helper for icmp_echo and icmpv6_echo_reply.
@@ -1122,7 +1125,7 @@ EXPORT_SYMBOL_GPL(icmp_build_probe);
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
-static bool icmp_timestamp(struct sk_buff *skb)
+static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
{
struct icmp_bxm icmp_param;
/*
@@ -1147,17 +1150,17 @@ static bool icmp_timestamp(struct sk_buff *skb)
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
- return true;
+ return SKB_NOT_DROPPED_YET;
out_err:
__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
- return false;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
}
-static bool icmp_discard(struct sk_buff *skb)
+static enum skb_drop_reason icmp_discard(struct sk_buff *skb)
{
/* pretend it was a success */
- return true;
+ return SKB_NOT_DROPPED_YET;
}
/*
@@ -1165,18 +1168,20 @@ static bool icmp_discard(struct sk_buff *skb)
*/
int icmp_rcv(struct sk_buff *skb)
{
- struct icmphdr *icmph;
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
- bool success;
+ struct icmphdr *icmph;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
- XFRM_STATE_ICMP))
+ XFRM_STATE_ICMP)) {
+ reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop;
+ }
if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
goto drop;
@@ -1184,8 +1189,11 @@ int icmp_rcv(struct sk_buff *skb)
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*icmph));
- if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
+ if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN,
+ skb)) {
+ reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop;
+ }
skb_set_network_header(skb, nh);
}
@@ -1207,13 +1215,13 @@ int icmp_rcv(struct sk_buff *skb)
/* We can't use icmp_pointers[].handler() because it is an array of
* size NR_ICMP_TYPES + 1 (19 elements) and PROBE has code 42.
*/
- success = icmp_echo(skb);
- goto success_check;
+ reason = icmp_echo(skb);
+ goto reason_check;
}
if (icmph->type == ICMP_EXT_ECHOREPLY) {
- success = ping_rcv(skb);
- goto success_check;
+ reason = ping_rcv(skb);
+ goto reason_check;
}
/*
@@ -1222,8 +1230,10 @@ int icmp_rcv(struct sk_buff *skb)
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
- if (icmph->type > NR_ICMP_TYPES)
+ if (icmph->type > NR_ICMP_TYPES) {
+ reason = SKB_DROP_REASON_UNHANDLED_PROTO;
goto error;
+ }
/*
* Parse the ICMP message
@@ -1239,27 +1249,30 @@ int icmp_rcv(struct sk_buff *skb)
if ((icmph->type == ICMP_ECHO ||
icmph->type == ICMP_TIMESTAMP) &&
net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
+ reason = SKB_DROP_REASON_INVALID_PROTO;
goto error;
}
if (icmph->type != ICMP_ECHO &&
icmph->type != ICMP_TIMESTAMP &&
icmph->type != ICMP_ADDRESS &&
icmph->type != ICMP_ADDRESSREPLY) {
+ reason = SKB_DROP_REASON_INVALID_PROTO;
goto error;
}
}
- success = icmp_pointers[icmph->type].handler(skb);
-success_check:
- if (success) {
+ reason = icmp_pointers[icmph->type].handler(skb);
+reason_check:
+ if (!reason) {
consume_skb(skb);
return NET_RX_SUCCESS;
}
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return NET_RX_DROP;
csum_error:
+ reason = SKB_DROP_REASON_ICMP_CSUM;
__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
error:
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1d9e6d5e9a76..b65d074d9620 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2839,7 +2839,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
seq_puts(seq,
"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
- struct ip_mc_list *im = (struct ip_mc_list *)v;
+ struct ip_mc_list *im = v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
char *querier;
long delta;
@@ -2983,7 +2983,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
{
- struct ip_sf_list *psf = (struct ip_sf_list *)v;
+ struct ip_sf_list *psf = v;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 63948f6aeca0..c9f9ac5013a7 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -510,7 +510,7 @@ EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
void *reasm_data, bool try_coalesce)
{
- struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+ struct sk_buff **nextp = reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
int sum_truesize;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 92ba3350274b..e3aa436a1bdf 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -90,6 +90,7 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options *opt = &(IPCB(skb)->opt);
struct net *net;
+ SKB_DR(reason);
/* that should never happen */
if (skb->pkt_type != PACKET_HOST)
@@ -101,8 +102,10 @@ int ip_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
+ SKB_DR_SET(reason, XFRM_POLICY);
goto drop;
+ }
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
return NET_RX_SUCCESS;
@@ -118,8 +121,10 @@ int ip_forward(struct sk_buff *skb)
if (ip_hdr(skb)->ttl <= 1)
goto too_many_hops;
- if (!xfrm4_route_forward(skb))
+ if (!xfrm4_route_forward(skb)) {
+ SKB_DR_SET(reason, XFRM_POLICY);
goto drop;
+ }
rt = skb_rtable(skb);
@@ -132,6 +137,7 @@ int ip_forward(struct sk_buff *skb)
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
+ SKB_DR_SET(reason, PKT_TOO_BIG);
goto drop;
}
@@ -169,7 +175,8 @@ too_many_hops:
/* Tell the sender its packet died... */
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
+ SKB_DR_SET(reason, IP_INHDR);
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return NET_RX_DROP;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index aacee9dd771b..7e474a85deaf 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -748,6 +748,7 @@ free_skb:
static void ipgre_link_update(struct net_device *dev, bool set_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ __be16 flags;
int len;
len = tunnel->tun_hlen;
@@ -763,19 +764,15 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
if (set_mtu)
dev->mtu = max_t(int, dev->mtu - len, 68);
- if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
- if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
- tunnel->encap.type == TUNNEL_ENCAP_NONE) {
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- } else {
- dev->features &= ~NETIF_F_GSO_SOFTWARE;
- dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
- }
- dev->features |= NETIF_F_LLTX;
- } else {
+ flags = tunnel->parms.o_flags;
+
+ if (flags & TUNNEL_SEQ ||
+ (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
+ dev->features &= ~NETIF_F_GSO_SOFTWARE;
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
- dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
+ } else {
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
}
@@ -949,6 +946,7 @@ static void ipgre_tunnel_setup(struct net_device *dev)
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
+ __be16 flags;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
@@ -957,25 +955,21 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
- dev->features |= GRE_FEATURES;
+ dev->features |= GRE_FEATURES | NETIF_F_LLTX;
dev->hw_features |= GRE_FEATURES;
- if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
- /* TCP offload with GRE SEQ is not supported, nor
- * can we support 2 levels of outer headers requiring
- * an update.
- */
- if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
- (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- }
+ flags = tunnel->parms.o_flags;
- /* Can use a lockless transmit, unless we generate
- * output sequences
- */
- dev->features |= NETIF_F_LLTX;
- }
+ /* TCP offload with GRE SEQ is not supported, nor can we support 2
+ * levels of outer headers requiring an update.
+ */
+ if (flags & TUNNEL_SEQ)
+ return;
+ if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
+ return;
+
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
static int ipgre_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 95f7bb052784..b1165f717cd1 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -451,6 +451,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
* that it receives, do not try to analyse it.
*/
if (skb->pkt_type == PACKET_OTHERHOST) {
+ dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
drop_reason = SKB_DROP_REASON_OTHERHOST;
goto drop;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c860519d57ee..13e6329784fb 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -356,7 +356,7 @@ static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct mfc_cache_cmp_arg *cmparg = arg->key;
- struct mfc_cache *c = (struct mfc_cache *)ptr;
+ const struct mfc_cache *c = ptr;
return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
cmparg->mfc_origin != c->mfc_origin;
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 4151eb1262dd..b75cac69bd7e 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -112,6 +112,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
fl4.daddr = iph->daddr;
fl4.saddr = get_saddr(iph->saddr);
} else {
+ if (nft_hook(pkt) == NF_INET_FORWARD &&
+ priv->flags & NFTA_FIB_F_IIF)
+ fl4.flowi4_iif = nft_out(pkt)->ifindex;
+
fl4.daddr = iph->saddr;
fl4.saddr = get_saddr(iph->daddr);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 3ee947557b88..5f8cad2978b3 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ping_err);
int ping_getfrag(void *from, char *to,
int offset, int fraglen, int odd, struct sk_buff *skb)
{
- struct pingfakehdr *pfh = (struct pingfakehdr *)from;
+ struct pingfakehdr *pfh = from;
if (offset == 0) {
fraglen -= sizeof(struct icmphdr);
@@ -844,8 +844,8 @@ do_confirm:
goto out;
}
-int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
@@ -861,7 +861,7 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
if (flags & MSG_ERRQUEUE)
return inet_recv_error(sk, msg, len, addr_len);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -934,16 +934,24 @@ out:
}
EXPORT_SYMBOL_GPL(ping_recvmsg);
-int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
+ struct sk_buff *skb)
{
+ enum skb_drop_reason reason;
+
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
inet_sk(sk), inet_sk(sk)->inet_num, skb);
- if (sock_queue_rcv_skb(sk, skb) < 0) {
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
+ kfree_skb_reason(skb, reason);
pr_debug("ping_queue_rcv_skb -> failed\n");
- return -1;
+ return reason;
}
- return 0;
+ return SKB_NOT_DROPPED_YET;
+}
+
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return __ping_queue_rcv_skb(sk, skb) ? -1 : 0;
}
EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
@@ -952,12 +960,12 @@ EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
* All we need to do is get the socket.
*/
-bool ping_rcv(struct sk_buff *skb)
+enum skb_drop_reason ping_rcv(struct sk_buff *skb)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET;
struct sock *sk;
struct net *net = dev_net(skb->dev);
struct icmphdr *icmph = icmp_hdr(skb);
- bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@ -972,15 +980,17 @@ bool ping_rcv(struct sk_buff *skb)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
- if (skb2 && !ping_queue_rcv_skb(sk, skb2))
- rc = true;
+ if (skb2)
+ reason = __ping_queue_rcv_skb(sk, skb2);
+ else
+ reason = SKB_DROP_REASON_NOMEM;
sock_put(sk);
}
- if (!rc)
+ if (reason)
pr_debug("no socket, dropping\n");
- return rc;
+ return reason;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9f97b9cbf7b3..bbd717805b10 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -753,7 +753,7 @@ out:
*/
static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
@@ -769,7 +769,7 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out;
}
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -783,7 +783,7 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (err)
goto done;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
/* Copy the address. */
if (sin) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 98c6f3429593..ffbe2e4f8c89 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -503,28 +503,29 @@ static void ip_rt_fix_tos(struct flowi4 *fl4)
__u8 tos = RT_FL_TOS(fl4);
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
- fl4->flowi4_scope = tos & RTO_ONLINK ?
- RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
+ if (tos & RTO_ONLINK)
+ fl4->flowi4_scope = RT_SCOPE_LINK;
}
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
- const struct sock *sk,
- const struct iphdr *iph,
- int oif, u8 tos,
- u8 prot, u32 mark, int flow_flags)
+ const struct sock *sk, const struct iphdr *iph,
+ int oif, __u8 tos, u8 prot, u32 mark,
+ int flow_flags)
{
+ __u8 scope = RT_SCOPE_UNIVERSE;
+
if (sk) {
const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if;
mark = sk->sk_mark;
- tos = RT_CONN_FLAGS(sk);
+ tos = ip_sock_rt_tos(sk);
+ scope = ip_sock_rt_scope(sk);
prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
- flowi4_init_output(fl4, oif, mark, tos,
- RT_SCOPE_UNIVERSE, prot,
- flow_flags,
- iph->daddr, iph->saddr, 0, 0,
+
+ flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
+ prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
@@ -534,9 +535,9 @@ static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
- u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
+ __u8 tos = iph->tos;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
@@ -552,7 +553,8 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
- RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
+ ip_sock_rt_scope(sk),
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
@@ -825,14 +827,13 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct net *net = dev_net(skb->dev);
int oif = skb->dev->ifindex;
- u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
+ __u8 tos = iph->tos;
rt = (struct rtable *) dst;
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
- ip_rt_fix_tos(&fl4);
__ip_do_redirect(rt, skb, &fl4, true);
}
@@ -945,6 +946,7 @@ static int ip_error(struct sk_buff *skb)
struct inet_peer *peer;
unsigned long now;
struct net *net;
+ SKB_DR(reason);
bool send;
int code;
@@ -964,10 +966,12 @@ static int ip_error(struct sk_buff *skb)
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
case EHOSTUNREACH:
+ SKB_DR_SET(reason, IP_INADDRERRORS);
__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
break;
case ENETUNREACH:
+ SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
}
@@ -983,6 +987,7 @@ static int ip_error(struct sk_buff *skb)
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
+ SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
@@ -1009,7 +1014,7 @@ static int ip_error(struct sk_buff *skb)
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
-out: kfree_skb(skb);
+out: kfree_skb_reason(skb, reason);
return 0;
}
@@ -1057,7 +1062,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
- ip_rt_fix_tos(&fl4);
/* Don't make lookup fail for bridged encapsulations */
if (skb && netif_is_any_bridge_port(skb->dev))
@@ -1074,8 +1078,8 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
struct rtable *rt;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
- __build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, mark, 0);
+ __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
+ 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -1132,8 +1136,6 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
goto out;
new = true;
- } else {
- ip_rt_fix_tos(&fl4);
}
__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
@@ -1165,8 +1167,7 @@ void ipv4_redirect(struct sk_buff *skb, struct net *net,
struct flowi4 fl4;
struct rtable *rt;
- __build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, 0, 0);
+ __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
@@ -3394,7 +3395,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fri.tb_id = table_id;
fri.dst = res.prefix;
fri.dst_len = res.prefixlen;
- fri.tos = fl4.flowi4_tos;
+ fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
fri.type = rt->rt_type;
fri.offload = 0;
fri.trap = 0;
@@ -3407,7 +3408,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (fa->fa_slen == slen &&
fa->tb_id == fri.tb_id &&
- fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) &&
+ fa->fa_dscp == fri.dscp &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
fri.offload = READ_ONCE(fa->offload);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ad80d180b60b..cd448cdd3b38 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -20,10 +20,6 @@
#include <net/protocol.h>
#include <net/netevent.h>
-static int two = 2;
-static int three __maybe_unused = 3;
-static int four = 4;
-static int thousand = 1000;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -1006,7 +1002,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "tcp_max_syn_backlog",
@@ -1059,7 +1055,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = &three,
+ .extra2 = SYSCTL_THREE,
},
{
.procname = "fib_multipath_hash_fields",
@@ -1117,7 +1113,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &four,
+ .extra2 = SYSCTL_FOUR,
},
{
.procname = "tcp_recovery",
@@ -1310,7 +1306,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &thousand,
+ .extra2 = SYSCTL_ONE_THOUSAND,
},
{
.procname = "tcp_pacing_ca_ratio",
@@ -1319,7 +1315,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &thousand,
+ .extra2 = SYSCTL_ONE_THOUSAND,
},
{
.procname = "tcp_wmem",
@@ -1391,7 +1387,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf18fbcbf123..b44fde435bd1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,7 +429,7 @@ void tcp_init_sock(struct sock *sk)
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
- tp->snd_cwnd = TCP_INIT_CWND;
+ tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
@@ -843,7 +843,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
}
release_sock(sk);
- sk_defer_free_flush(sk);
if (spliced)
return spliced;
@@ -1589,20 +1588,6 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
-void __sk_defer_free_flush(struct sock *sk)
-{
- struct llist_node *head;
- struct sk_buff *skb, *n;
-
- head = llist_del_all(&sk->defer_list);
- llist_for_each_entry_safe(skb, n, head, ll_node) {
- prefetch(n);
- skb_mark_not_on_list(skb);
- __kfree_skb(skb);
- }
-}
-EXPORT_SYMBOL(__sk_defer_free_flush);
-
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
@@ -1610,11 +1595,7 @@ static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
sock_rfree(skb);
skb->destructor = NULL;
skb->sk = NULL;
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- !llist_empty(&sk->defer_list)) {
- llist_add(&skb->ll_node, &sk->defer_list);
- return;
- }
+ return skb_attempt_defer_free(skb);
}
__kfree_skb(skb);
}
@@ -1877,8 +1858,7 @@ static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
}
static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags,
- struct scm_timestamping_internal *tss,
+ int flags, struct scm_timestamping_internal *tss,
int *cmsg_flags);
static int receive_fallback_to_copy(struct sock *sk,
struct tcp_zerocopy_receive *zc, int inq,
@@ -1900,7 +1880,7 @@ static int receive_fallback_to_copy(struct sock *sk,
if (err)
return err;
- err = tcp_recvmsg_locked(sk, &msg, inq, /*nonblock=*/1, /*flags=*/0,
+ err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT,
tss, &zc->msg_flags);
if (err < 0)
return err;
@@ -2316,8 +2296,7 @@ static int tcp_inq_hint(struct sock *sk)
*/
static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags,
- struct scm_timestamping_internal *tss,
+ int flags, struct scm_timestamping_internal *tss,
int *cmsg_flags)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2335,9 +2314,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
if (sk->sk_state == TCP_LISTEN)
goto out;
- if (tp->recvmsg_inq)
+ if (tp->recvmsg_inq) {
*cmsg_flags = TCP_CMSG_INQ;
- timeo = sock_rcvtimeo(sk, nonblock);
+ msg->msg_get_inq = 1;
+ }
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
/* Urgent data needs to be handled specially. */
if (flags & MSG_OOB)
@@ -2455,7 +2436,6 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
__sk_flush_backlog(sk);
} else {
tcp_cleanup_rbuf(sk, copied);
- sk_defer_free_flush(sk);
sk_wait_data(sk, &timeo, last);
}
@@ -2556,10 +2536,10 @@ recv_sndq:
goto out;
}
-int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
- int flags, int *addr_len)
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len)
{
- int cmsg_flags = 0, ret, inq;
+ int cmsg_flags = 0, ret;
struct scm_timestamping_internal tss;
if (unlikely(flags & MSG_ERRQUEUE))
@@ -2568,20 +2548,20 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
- ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss,
- &cmsg_flags);
+ ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
release_sock(sk);
- sk_defer_free_flush(sk);
- if (cmsg_flags && ret >= 0) {
+ if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
if (cmsg_flags & TCP_CMSG_TS)
tcp_recv_timestamp(msg, sk, &tss);
- if (cmsg_flags & TCP_CMSG_INQ) {
- inq = tcp_inq_hint(sk);
- put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
+ if (msg->msg_get_inq) {
+ msg->msg_inq = tcp_inq_hint(sk);
+ if (cmsg_flags & TCP_CMSG_INQ)
+ put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
+ sizeof(msg->msg_inq), &msg->msg_inq);
}
}
return ret;
@@ -3033,7 +3013,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_rto_min = TCP_RTO_MIN;
icsk->icsk_delack_max = TCP_DELACK_MAX;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- tp->snd_cwnd = TCP_INIT_CWND;
+ tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tp->delivered = 0;
@@ -3099,7 +3079,6 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_frag.page = NULL;
sk->sk_frag.offset = 0;
}
- sk_defer_free_flush(sk);
sk_error_report(sk);
return 0;
}
@@ -3744,7 +3723,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_max_pacing_rate = rate64;
info->tcpi_reordering = tp->reordering;
- info->tcpi_snd_cwnd = tp->snd_cwnd;
+ info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
if (info->tcpi_state == TCP_LISTEN) {
/* listeners aliased fields :
@@ -3915,7 +3894,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
rate64 = tcp_compute_delivery_rate(tp);
nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
- nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
+ nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
@@ -4228,7 +4207,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
&zc, &len, err);
release_sock(sk);
- sk_defer_free_flush(sk);
if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
goto zerocopy_rcv_cmsg;
switch (len) {
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 02e8626ccb27..c7d30a3bbd81 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -276,7 +276,7 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
} else { /* no RTT sample yet */
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
}
- bw = (u64)tp->snd_cwnd * BW_UNIT;
+ bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
do_div(bw, rtt_us);
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
}
@@ -323,9 +323,9 @@ static void bbr_save_cwnd(struct sock *sk)
struct bbr *bbr = inet_csk_ca(sk);
if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
- bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
+ bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */
else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
- bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
+ bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
}
static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
@@ -482,7 +482,7 @@ static bool bbr_set_cwnd_to_recover_or_restore(
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
- u32 cwnd = tp->snd_cwnd;
+ u32 cwnd = tcp_snd_cwnd(tp);
/* An ACK for P pkts should release at most 2*P packets. We do this
* in two steps. First, here we deduct the number of lost packets.
@@ -520,7 +520,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
+ u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
if (!acked)
goto done; /* no packet fully ACKed; just apply caps */
@@ -544,9 +544,9 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
cwnd = max(cwnd, bbr_cwnd_min_target);
done:
- tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
+ tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */
if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
- tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target));
}
/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
@@ -856,7 +856,7 @@ static void bbr_update_ack_aggregation(struct sock *sk,
bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
bbr->ack_epoch_acked + rs->acked_sacked);
extra_acked = bbr->ack_epoch_acked - expected_acked;
- extra_acked = min(extra_acked, tp->snd_cwnd);
+ extra_acked = min(extra_acked, tcp_snd_cwnd(tp));
if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
}
@@ -914,7 +914,7 @@ static void bbr_check_probe_rtt_done(struct sock *sk)
return;
bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
- tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+ tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
bbr_reset_mode(sk);
}
@@ -1093,7 +1093,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
bbr->full_bw_cnt = 0;
bbr_reset_lt_bw_sampling(sk);
- return tcp_sk(sk)->snd_cwnd;
+ return tcp_snd_cwnd(tcp_sk(sk));
}
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index f5f588b1f6e9..58358bf92e1b 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!acked)
return;
}
- bictcp_update(ca, tp->snd_cwnd);
+ bictcp_update(ca, tcp_snd_cwnd(tp));
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
@@ -166,16 +166,16 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
ca->epoch_start = 0; /* end of epoch */
/* Wmax and fast convergence */
- if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
- ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
+ if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
+ ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
/ (2 * BICTCP_BETA_SCALE);
else
- ca->last_max_cwnd = tp->snd_cwnd;
+ ca->last_max_cwnd = tcp_snd_cwnd(tp);
- if (tp->snd_cwnd <= low_window)
- return max(tp->snd_cwnd >> 1U, 2U);
+ if (tcp_snd_cwnd(tp) <= low_window)
+ return max(tcp_snd_cwnd(tp) >> 1U, 2U);
else
- return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
+ return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
}
static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 1cdcb4df0eb7..be3947e70fec 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -174,7 +174,6 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
static int tcp_bpf_recvmsg_parser(struct sock *sk,
struct msghdr *msg,
size_t len,
- int nonblock,
int flags,
int *addr_len)
{
@@ -186,7 +185,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
psock = sk_psock_get(sk);
if (unlikely(!psock))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ return tcp_recvmsg(sk, msg, len, flags, addr_len);
lock_sock(sk);
msg_bytes_ready:
@@ -211,7 +210,7 @@ msg_bytes_ready:
goto out;
}
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
if (!timeo) {
copied = -EAGAIN;
goto out;
@@ -234,7 +233,7 @@ out:
}
static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct sk_psock *psock;
int copied, ret;
@@ -244,11 +243,11 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
psock = sk_psock_get(sk);
if (unlikely(!psock))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ return tcp_recvmsg(sk, msg, len, flags, addr_len);
if (!skb_queue_empty(&sk->sk_receive_queue) &&
sk_psock_queue_empty(psock)) {
sk_psock_put(sk, psock);
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ return tcp_recvmsg(sk, msg, len, flags, addr_len);
}
lock_sock(sk);
msg_bytes_ready:
@@ -257,14 +256,14 @@ msg_bytes_ready:
long timeo;
int data;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = tcp_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
release_sock(sk);
sk_psock_put(sk, psock);
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ return tcp_recvmsg(sk, msg, len, flags, addr_len);
}
copied = -EAGAIN;
}
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 709d23801823..ddc7ba0554bd 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -161,8 +161,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
- tp->snd_cwnd);
- tp->snd_ssthresh = tp->snd_cwnd;
+ tcp_snd_cwnd(tp));
+ tp->snd_ssthresh = tcp_snd_cwnd(tp);
return;
}
}
@@ -180,8 +180,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
- tp->snd_cwnd);
- tp->snd_ssthresh = tp->snd_cwnd;
+ tcp_snd_cwnd(tp));
+ tp->snd_ssthresh = tcp_snd_cwnd(tp);
}
}
}
@@ -252,7 +252,7 @@ static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
return false;
}
- ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
+ ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp));
ca->state = CDG_BACKOFF;
tcp_enter_cwr(sk);
return true;
@@ -285,14 +285,14 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
if (!tcp_is_cwnd_limited(sk)) {
- ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
+ ca->shadow_wnd = min(ca->shadow_wnd, tcp_snd_cwnd(tp));
return;
}
- prior_snd_cwnd = tp->snd_cwnd;
+ prior_snd_cwnd = tcp_snd_cwnd(tp);
tcp_reno_cong_avoid(sk, ack, acked);
- incr = tp->snd_cwnd - prior_snd_cwnd;
+ incr = tcp_snd_cwnd(tp) - prior_snd_cwnd;
ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
}
@@ -331,15 +331,15 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (ca->state == CDG_BACKOFF)
- return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
+ return max(2U, (tcp_snd_cwnd(tp) * min(1024U, backoff_beta)) >> 10);
if (ca->state == CDG_NONFULL && use_tolerance)
- return tp->snd_cwnd;
+ return tcp_snd_cwnd(tp);
- ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
+ ca->shadow_wnd = min(ca->shadow_wnd >> 1, tcp_snd_cwnd(tp));
if (use_shadow)
- return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
- return max(2U, tp->snd_cwnd >> 1);
+ return max3(2U, ca->shadow_wnd, tcp_snd_cwnd(tp) >> 1);
+ return max(2U, tcp_snd_cwnd(tp) >> 1);
}
static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
@@ -357,7 +357,7 @@ static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
ca->gradients = gradients;
ca->rtt_seq = tp->snd_nxt;
- ca->shadow_wnd = tp->snd_cwnd;
+ ca->shadow_wnd = tcp_snd_cwnd(tp);
break;
case CA_EVENT_COMPLETE_CWR:
ca->state = CDG_UNKNOWN;
@@ -380,7 +380,7 @@ static void tcp_cdg_init(struct sock *sk)
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
GFP_NOWAIT | __GFP_NOWARN);
ca->rtt_seq = tp->snd_nxt;
- ca->shadow_wnd = tp->snd_cwnd;
+ ca->shadow_wnd = tcp_snd_cwnd(tp);
}
static void tcp_cdg_release(struct sock *sk)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index dc95572163df..d3cae40749e8 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -16,6 +16,7 @@
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <net/tcp.h>
+#include <trace/events/tcp.h>
static DEFINE_SPINLOCK(tcp_cong_list_lock);
static LIST_HEAD(tcp_cong_list);
@@ -33,6 +34,17 @@ struct tcp_congestion_ops *tcp_ca_find(const char *name)
return NULL;
}
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ trace_tcp_cong_state_set(sk, ca_state);
+
+ if (icsk->icsk_ca_ops->set_state)
+ icsk->icsk_ca_ops->set_state(sk, ca_state);
+ icsk->icsk_ca_state = ca_state;
+}
+
/* Must be called with rcu lock held */
static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
const char *name)
@@ -393,10 +405,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
*/
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
- u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
+ u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
- acked -= cwnd - tp->snd_cwnd;
- tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+ acked -= cwnd - tcp_snd_cwnd(tp);
+ tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
return acked;
}
@@ -410,7 +422,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd++;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
}
tp->snd_cwnd_cnt += acked;
@@ -418,9 +430,9 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
u32 delta = tp->snd_cwnd_cnt / w;
tp->snd_cwnd_cnt -= delta * w;
- tp->snd_cwnd += delta;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
}
- tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
}
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
@@ -445,7 +457,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
}
/* In dangerous area, increase slowly. */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+ tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
@@ -454,7 +466,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- return max(tp->snd_cwnd >> 1U, 2U);
+ return max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
@@ -462,7 +474,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- return max(tp->snd_cwnd, tp->prior_cwnd);
+ return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
}
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 24d562dd6225..b0918839bee7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -334,7 +334,7 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!acked)
return;
}
- bictcp_update(ca, tp->snd_cwnd, acked);
+ bictcp_update(ca, tcp_snd_cwnd(tp), acked);
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
@@ -346,13 +346,13 @@ static u32 cubictcp_recalc_ssthresh(struct sock *sk)
ca->epoch_start = 0; /* end of epoch */
/* Wmax and fast convergence */
- if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
- ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
+ if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
+ ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
/ (2 * BICTCP_BETA_SCALE);
else
- ca->last_max_cwnd = tp->snd_cwnd;
+ ca->last_max_cwnd = tcp_snd_cwnd(tp);
- return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
+ return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
}
static void cubictcp_state(struct sock *sk, u8 new_state)
@@ -413,13 +413,13 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->found = 1;
pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
now - ca->round_start, threshold,
- ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
+ ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp));
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
- tp->snd_cwnd);
- tp->snd_ssthresh = tp->snd_cwnd;
+ tcp_snd_cwnd(tp));
+ tp->snd_ssthresh = tcp_snd_cwnd(tp);
}
}
}
@@ -438,8 +438,8 @@ static void hystart_update(struct sock *sk, u32 delay)
LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
- tp->snd_cwnd);
- tp->snd_ssthresh = tp->snd_cwnd;
+ tcp_snd_cwnd(tp));
+ tp->snd_ssthresh = tcp_snd_cwnd(tp);
}
}
}
@@ -469,7 +469,7 @@ static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
/* hystart triggers when cwnd is larger than some threshold */
if (!ca->found && tcp_in_slow_start(tp) && hystart &&
- tp->snd_cwnd >= hystart_low_window)
+ tcp_snd_cwnd(tp) >= hystart_low_window)
hystart_update(sk, delay);
}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 1943a6630341..ab034a4e9324 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -106,8 +106,8 @@ static u32 dctcp_ssthresh(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- ca->loss_cwnd = tp->snd_cwnd;
- return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
+ ca->loss_cwnd = tcp_snd_cwnd(tp);
+ return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
}
static void dctcp_update_alpha(struct sock *sk, u32 flags)
@@ -148,8 +148,8 @@ static void dctcp_react_to_loss(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- ca->loss_cwnd = tp->snd_cwnd;
- tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+ ca->loss_cwnd = tcp_snd_cwnd(tp);
+ tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
static void dctcp_state(struct sock *sk, u8 new_state)
@@ -211,8 +211,9 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
static u32 dctcp_cwnd_undo(struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
- return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+ return max(tcp_snd_cwnd(tp), ca->loss_cwnd);
}
static struct tcp_congestion_ops dctcp __read_mostly = {
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 349069d6cd0a..c6de5ce79ad3 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -127,22 +127,22 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* snd_cwnd <=
* hstcp_aimd_vals[ca->ai].cwnd
*/
- if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
- while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
+ if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) {
+ while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd &&
ca->ai < HSTCP_AIMD_MAX - 1)
ca->ai++;
- } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
- while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
+ } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) {
+ while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd)
ca->ai--;
}
/* Do additive increase */
- if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
+ if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
/* cwnd = cwnd + a(w) / cwnd */
tp->snd_cwnd_cnt += ca->ai + 1;
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- tp->snd_cwnd_cnt -= tp->snd_cwnd;
- tp->snd_cwnd++;
+ if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
+ tp->snd_cwnd_cnt -= tcp_snd_cwnd(tp);
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
}
}
}
@@ -154,7 +154,7 @@ static u32 hstcp_ssthresh(struct sock *sk)
struct hstcp *ca = inet_csk_ca(sk);
/* Do multiplicative decrease */
- return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
+ return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
}
static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 55adcfcf96fe..52b1f2665dfa 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -124,7 +124,7 @@ static void measure_achieved_throughput(struct sock *sk,
ca->packetcount += sample->pkts_acked;
- if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
+ if (ca->packetcount >= tcp_snd_cwnd(tp) - (ca->alpha >> 7 ? : 1) &&
now - ca->lasttime >= ca->minRTT &&
ca->minRTT > 0) {
__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
@@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
const struct htcp *ca = inet_csk_ca(sk);
htcp_param_update(sk);
- return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
+ return max((tcp_snd_cwnd(tp) * ca->beta) >> 7, 2U);
}
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
@@ -242,9 +242,9 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/
- if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
+ if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tcp_snd_cwnd(tp)) {
+ if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp)
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
tp->snd_cwnd_cnt = 0;
htcp_alpha_update(ca);
} else
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index be39327e04e6..abd7d91807e5 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -54,7 +54,7 @@ static void hybla_init(struct sock *sk)
ca->rho2_7ls = 0;
ca->snd_cwnd_cents = 0;
ca->hybla_en = true;
- tp->snd_cwnd = 2;
+ tcp_snd_cwnd_set(tp, 2);
tp->snd_cwnd_clamp = 65535;
/* 1st Rho measurement based on initial srtt */
@@ -62,7 +62,7 @@ static void hybla_init(struct sock *sk)
/* set minimum rtt as this is the 1st ever seen */
ca->minrtt_us = tp->srtt_us;
- tp->snd_cwnd = ca->rho;
+ tcp_snd_cwnd_set(tp, ca->rho);
}
static void hybla_state(struct sock *sk, u8 ca_state)
@@ -137,31 +137,31 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* as long as increment is estimated as (rho<<7)/window
* it already is <<7 and we can easily count its fractions.
*/
- increment = ca->rho2_7ls / tp->snd_cwnd;
+ increment = ca->rho2_7ls / tcp_snd_cwnd(tp);
if (increment < 128)
tp->snd_cwnd_cnt++;
}
odd = increment % 128;
- tp->snd_cwnd += increment >> 7;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7));
ca->snd_cwnd_cents += odd;
/* check when fractions goes >=128 and increase cwnd by 1. */
while (ca->snd_cwnd_cents >= 128) {
- tp->snd_cwnd++;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
ca->snd_cwnd_cents -= 128;
tp->snd_cwnd_cnt = 0;
}
/* check when cwnd has not been incremented for a while */
- if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- tp->snd_cwnd++;
+ if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
tp->snd_cwnd_cnt = 0;
}
/* clamp down slowstart cwnd to ssthresh value. */
if (is_slowstart)
- tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh));
- tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
}
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 00e54873213e..c0c81a2c77fa 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -224,7 +224,7 @@ static void update_params(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
- if (tp->snd_cwnd < win_thresh) {
+ if (tcp_snd_cwnd(tp) < win_thresh) {
ca->alpha = ALPHA_BASE;
ca->beta = BETA_BASE;
} else if (ca->cnt_rtt > 0) {
@@ -284,9 +284,9 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* tp->snd_cwnd += alpha/tp->snd_cwnd
*/
delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
- if (delta >= tp->snd_cwnd) {
- tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
- (u32)tp->snd_cwnd_clamp);
+ if (delta >= tcp_snd_cwnd(tp)) {
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp) + delta / tcp_snd_cwnd(tp),
+ (u32)tp->snd_cwnd_clamp));
tp->snd_cwnd_cnt = 0;
}
}
@@ -296,9 +296,11 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
+ u32 decr;
/* Multiplicative decrease */
- return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
+ decr = (tcp_snd_cwnd(tp) * ca->beta) >> BETA_SHIFT;
+ return max(tcp_snd_cwnd(tp) - decr, 2U);
}
/* Extract info for Tcp socket info provided via netlink. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 60f99e9fb6d1..97cfcd85f84e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -414,7 +414,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
per_mss = roundup_pow_of_two(per_mss) +
SKB_DATA_ALIGN(sizeof(struct sk_buff));
- nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+ nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
/* Fast Recovery (RFC 5681 3.2) :
@@ -909,12 +909,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
* If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
* end of slow start and should slow down.
*/
- if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+ if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
else
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
- rate *= max(tp->snd_cwnd, tp->packets_out);
+ rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
if (likely(tp->srtt_us))
do_div(rate, tp->srtt_us);
@@ -2147,12 +2147,12 @@ void tcp_enter_loss(struct sock *sk)
!after(tp->high_seq, tp->snd_una) ||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(sk);
- tp->prior_cwnd = tp->snd_cwnd;
+ tp->prior_cwnd = tcp_snd_cwnd(tp);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
tcp_init_undo(tp);
}
- tp->snd_cwnd = tcp_packets_in_flight(tp) + 1;
+ tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_jiffies32;
@@ -2458,7 +2458,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
msg,
&inet->inet_daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
+ tcp_snd_cwnd(tp), tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
@@ -2467,7 +2467,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg,
&sk->sk_v6_daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
+ tcp_snd_cwnd(tp), tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
@@ -2492,7 +2492,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
if (tp->prior_ssthresh) {
const struct inet_connection_sock *icsk = inet_csk(sk);
- tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
+ tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
@@ -2599,7 +2599,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
tp->high_seq = tp->snd_nxt;
tp->tlp_high_seq = 0;
tp->snd_cwnd_cnt = 0;
- tp->prior_cwnd = tp->snd_cwnd;
+ tp->prior_cwnd = tcp_snd_cwnd(tp);
tp->prr_delivered = 0;
tp->prr_out = 0;
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
@@ -2629,7 +2629,7 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost,
}
/* Force a fast retransmit upon entering fast recovery */
sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
- tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+ tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
}
static inline void tcp_end_cwnd_reduction(struct sock *sk)
@@ -2642,7 +2642,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
- tp->snd_cwnd = tp->snd_ssthresh;
+ tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
tp->snd_cwnd_stamp = tcp_jiffies32;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2709,9 +2709,9 @@ static void tcp_mtup_probe_success(struct sock *sk)
/* FIXME: breaks with very large cwnd */
tp->prior_ssthresh = tcp_current_ssthresh(sk);
- tp->snd_cwnd = tp->snd_cwnd *
- tcp_mss_to_mtu(sk, tp->mss_cache) /
- icsk->icsk_mtup.probe_size;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) *
+ tcp_mss_to_mtu(sk, tp->mss_cache) /
+ icsk->icsk_mtup.probe_size);
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
@@ -3034,7 +3034,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->snd_una == tp->mtu_probe.probe_seq_start) {
tcp_mtup_probe_failed(sk);
/* Restores the reduction we did in tcp_mtup_probe() */
- tp->snd_cwnd++;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
tcp_simple_retransmit(sk);
return;
}
@@ -3766,7 +3766,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una - tp->max_window)) {
if (!(flag & FLAG_NO_CHALLENGE_ACK))
tcp_send_challenge_ack(sk);
- return -1;
+ return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
}
goto old_ack;
}
@@ -3775,7 +3775,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
* this segment (RFC793 Section 3.9).
*/
if (after(ack, tp->snd_nxt))
- return -1;
+ return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA;
if (after(ack, prior_snd_una)) {
flag |= FLAG_SND_UNA_ADVANCED;
@@ -4675,7 +4675,7 @@ static bool tcp_ooo_try_coalesce(struct sock *sk,
{
bool res = tcp_try_coalesce(sk, to, from, fragstolen);
- /* In case tcp_drop() is called later, update to->gso_segs */
+ /* In case tcp_drop_reason() is called later, update to->gso_segs */
if (res) {
u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
max_t(u16, 1, skb_shinfo(from)->gso_segs);
@@ -4692,11 +4692,6 @@ static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb,
kfree_skb_reason(skb, reason);
}
-static void tcp_drop(struct sock *sk, struct sk_buff *skb)
-{
- tcp_drop_reason(sk, skb, SKB_DROP_REASON_NOT_SPECIFIED);
-}
-
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
@@ -4724,7 +4719,7 @@ static void tcp_ofo_queue(struct sock *sk)
rb_erase(&skb->rbnode, &tp->out_of_order_queue);
if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
- tcp_drop(sk, skb);
+ tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP);
continue;
}
@@ -5335,7 +5330,8 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
goal -= rb_to_skb(node)->truesize;
- tcp_drop(sk, rb_to_skb(node));
+ tcp_drop_reason(sk, rb_to_skb(node),
+ SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
if (!prev || goal <= 0) {
sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -5437,7 +5433,7 @@ static bool tcp_should_expand_sndbuf(struct sock *sk)
return false;
/* If we filled the congestion window, do not expand. */
- if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+ if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
return false;
return true;
@@ -5678,7 +5674,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, int syn_inerr)
{
struct tcp_sock *tp = tcp_sk(sk);
- bool rst_seq_match = false;
+ SKB_DR(reason);
/* RFC1323: H1. Apply PAWS check first. */
if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
@@ -5690,6 +5686,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
LINUX_MIB_TCPACKSKIPPEDPAWS,
&tp->last_oow_ack_time))
tcp_send_dupack(sk, skb);
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto discard;
}
/* Reset is accepted even if it did not pass PAWS. */
@@ -5711,8 +5708,9 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
&tp->last_oow_ack_time))
tcp_send_dupack(sk, skb);
} else if (tcp_reset_check(sk, skb)) {
- tcp_reset(sk, skb);
+ goto reset;
}
+ SKB_DR_SET(reason, TCP_INVALID_SEQUENCE);
goto discard;
}
@@ -5728,9 +5726,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* Send a challenge ACK
*/
if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
- tcp_reset_check(sk, skb)) {
- rst_seq_match = true;
- } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
+ tcp_reset_check(sk, skb))
+ goto reset;
+
+ if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
struct tcp_sack_block *sp = &tp->selective_acks[0];
int max_sack = sp[0].end_seq;
int this_sack;
@@ -5743,21 +5742,18 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
}
if (TCP_SKB_CB(skb)->seq == max_sack)
- rst_seq_match = true;
+ goto reset;
}
- if (rst_seq_match)
- tcp_reset(sk, skb);
- else {
- /* Disable TFO if RST is out-of-order
- * and no data has been received
- * for current active TFO socket
- */
- if (tp->syn_fastopen && !tp->data_segs_in &&
- sk->sk_state == TCP_ESTABLISHED)
- tcp_fastopen_active_disable(sk);
- tcp_send_challenge_ack(sk);
- }
+ /* Disable TFO if RST is out-of-order
+ * and no data has been received
+ * for current active TFO socket
+ */
+ if (tp->syn_fastopen && !tp->data_segs_in &&
+ sk->sk_state == TCP_ESTABLISHED)
+ tcp_fastopen_active_disable(sk);
+ tcp_send_challenge_ack(sk);
+ SKB_DR_SET(reason, TCP_RESET);
goto discard;
}
@@ -5772,6 +5768,7 @@ syn_challenge:
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
tcp_send_challenge_ack(sk);
+ SKB_DR_SET(reason, TCP_INVALID_SYN);
goto discard;
}
@@ -5780,7 +5777,12 @@ syn_challenge:
return true;
discard:
- tcp_drop(sk, skb);
+ tcp_drop_reason(sk, skb, reason);
+ return false;
+
+reset:
+ tcp_reset(sk, skb);
+ __kfree_skb(skb);
return false;
}
@@ -5926,6 +5928,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
+ skb_dst_drop(skb);
__skb_pull(skb, tcp_header_len);
eaten = tcp_queue_rcv(sk, skb, &fragstolen);
@@ -5967,9 +5970,11 @@ slow_path:
return;
step5:
- if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
+ reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
+ if ((int)reason < 0) {
+ reason = -reason;
goto discard;
-
+ }
tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */
@@ -6009,9 +6014,9 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
* retransmission has occurred.
*/
if (tp->total_retrans > 1 && tp->undo_marker)
- tp->snd_cwnd = 1;
+ tcp_snd_cwnd_set(tp, 1);
else
- tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+ tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
tp->snd_cwnd_stamp = tcp_jiffies32;
bpf_skops_established(sk, bpf_op, skb);
@@ -6147,6 +6152,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_fastopen_cookie foc = { .len = -1 };
int saved_clamp = tp->rx_opt.mss_clamp;
bool fastopen_fail;
+ SKB_DR(reason);
tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
@@ -6189,7 +6195,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (th->rst) {
tcp_reset(sk, skb);
- goto discard;
+consume:
+ __kfree_skb(skb);
+ return 0;
}
/* rfc793:
@@ -6199,9 +6207,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* See note below!
* --ANK(990513)
*/
- if (!th->syn)
+ if (!th->syn) {
+ SKB_DR_SET(reason, TCP_FLAGS);
goto discard_and_undo;
-
+ }
/* rfc793:
* "If the SYN bit is on ...
* are acceptable then ...
@@ -6278,13 +6287,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
-
-discard:
- tcp_drop(sk, skb);
- return 0;
- } else {
- tcp_send_ack(sk);
+ goto consume;
}
+ tcp_send_ack(sk);
return -1;
}
@@ -6296,15 +6301,16 @@ discard:
*
* Otherwise (no ACK) drop the segment and return."
*/
-
+ SKB_DR_SET(reason, TCP_RESET);
goto discard_and_undo;
}
/* PAWS check. */
if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
- tcp_paws_reject(&tp->rx_opt, 0))
+ tcp_paws_reject(&tp->rx_opt, 0)) {
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto discard_and_undo;
-
+ }
if (th->syn) {
/* We see SYN without ACK. It is attempt of
* simultaneous connect with crossed SYNs.
@@ -6353,7 +6359,7 @@ discard:
*/
return -1;
#else
- goto discard;
+ goto consume;
#endif
}
/* "fifth, if neither of the SYN or RST bits is set then
@@ -6363,7 +6369,8 @@ discard:
discard_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
- goto discard;
+ tcp_drop_reason(sk, skb, reason);
+ return 0;
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
@@ -6418,21 +6425,26 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
struct request_sock *req;
int queued = 0;
bool acceptable;
+ SKB_DR(reason);
switch (sk->sk_state) {
case TCP_CLOSE:
+ SKB_DR_SET(reason, TCP_CLOSE);
goto discard;
case TCP_LISTEN:
if (th->ack)
return 1;
- if (th->rst)
+ if (th->rst) {
+ SKB_DR_SET(reason, TCP_RESET);
goto discard;
-
+ }
if (th->syn) {
- if (th->fin)
+ if (th->fin) {
+ SKB_DR_SET(reason, TCP_FLAGS);
goto discard;
+ }
/* It is possible that we process SYN packets from backlog,
* so we need to make sure to disable BH and RCU right there.
*/
@@ -6447,6 +6459,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
consume_skb(skb);
return 0;
}
+ SKB_DR_SET(reason, TCP_FLAGS);
goto discard;
case TCP_SYN_SENT:
@@ -6473,13 +6486,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
- if (!tcp_check_req(sk, skb, req, true, &req_stolen))
+ if (!tcp_check_req(sk, skb, req, true, &req_stolen)) {
+ SKB_DR_SET(reason, TCP_FASTOPEN);
goto discard;
+ }
}
- if (!th->ack && !th->rst && !th->syn)
+ if (!th->ack && !th->rst && !th->syn) {
+ SKB_DR_SET(reason, TCP_FLAGS);
goto discard;
-
+ }
if (!tcp_validate_incoming(sk, skb, th, 0))
return 0;
@@ -6492,6 +6508,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV)
return 1; /* send one RST */
tcp_send_challenge_ack(sk);
+ SKB_DR_SET(reason, TCP_OLD_ACK);
goto discard;
}
switch (sk->sk_state) {
@@ -6585,7 +6602,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
inet_csk_reset_keepalive_timer(sk, tmo);
} else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
- goto discard;
+ goto consume;
}
break;
}
@@ -6593,7 +6610,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_CLOSING:
if (tp->snd_una == tp->write_seq) {
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
- goto discard;
+ goto consume;
}
break;
@@ -6601,7 +6618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una == tp->write_seq) {
tcp_update_metrics(sk);
tcp_done(sk);
- goto discard;
+ goto consume;
}
break;
}
@@ -6652,9 +6669,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!queued) {
discard:
- tcp_drop(sk, skb);
+ tcp_drop_reason(sk, skb, reason);
}
return 0;
+
+consume:
+ __kfree_skb(skb);
+ return 0;
}
EXPORT_SYMBOL(tcp_rcv_state_process);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f9cec624068d..918816ec5dd4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -229,9 +229,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
orig_dport = usin->sin_port;
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
- IPPROTO_TCP,
- orig_sport, orig_dport, sk);
+ sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
+ orig_dport, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
@@ -2066,7 +2065,6 @@ process:
sk_incoming_cpu_update(sk);
- sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
@@ -2621,7 +2619,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
- tp->snd_cwnd,
+ tcp_snd_cwnd(tp),
state == TCP_LISTEN ?
fastopenq->max_qlen :
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 82b36ec3f2f8..ae36780977d2 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -297,7 +297,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
lp->flag &= ~LP_WITHIN_THR;
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
- tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max,
+ tcp_snd_cwnd(tp), lp->remote_hz, lp->owd_min, lp->owd_max,
lp->sowd >> 3);
if (lp->flag & LP_WITHIN_THR)
@@ -313,12 +313,12 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
/* happened within inference
* drop snd_cwnd into 1 */
if (lp->flag & LP_WITHIN_INF)
- tp->snd_cwnd = 1U;
+ tcp_snd_cwnd_set(tp, 1U);
/* happened after inference
* cut snd_cwnd into half */
else
- tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
+ tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp) >> 1U, 1U));
/* record this drop time */
lp->last_drop = now;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0588b004ddac..7029b0e98edb 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -388,15 +388,15 @@ void tcp_update_metrics(struct sock *sk)
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
- if (val && (tp->snd_cwnd >> 1) > val)
+ if (val && (tcp_snd_cwnd(tp) >> 1) > val)
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
- tp->snd_cwnd >> 1);
+ tcp_snd_cwnd(tp) >> 1);
}
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
val = tcp_metric_get(tm, TCP_METRIC_CWND);
- if (tp->snd_cwnd > val)
+ if (tcp_snd_cwnd(tp) > val)
tcp_metric_set(tm, TCP_METRIC_CWND,
- tp->snd_cwnd);
+ tcp_snd_cwnd(tp));
}
} else if (!tcp_in_slow_start(tp) &&
icsk->icsk_ca_state == TCP_CA_Open) {
@@ -404,10 +404,10 @@ void tcp_update_metrics(struct sock *sk)
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
- max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
+ max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
val = tcp_metric_get(tm, TCP_METRIC_CWND);
- tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
+ tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
}
} else {
/* Else slow start did not finish, cwnd is non-sense,
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index ab552356bdba..a60662f4bdf9 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -197,10 +197,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
if (ca->cwnd_growth_factor < 0) {
- cnt = tp->snd_cwnd << -ca->cwnd_growth_factor;
+ cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor;
tcp_cong_avoid_ai(tp, cnt, acked);
} else {
- cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor);
+ cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor);
tcp_cong_avoid_ai(tp, cnt, acked);
}
}
@@ -209,7 +209,7 @@ static u32 tcpnv_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
+ return max((tcp_snd_cwnd(tp) * nv_loss_dec_factor) >> 10, 2U);
}
static void tcpnv_state(struct sock *sk, u8 new_state)
@@ -257,7 +257,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
return;
/* Stop cwnd growth if we were in catch up mode */
- if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) {
+ if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) {
ca->nv_catchup = 0;
ca->nv_allow_cwnd_growth = 0;
}
@@ -371,7 +371,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
* if cwnd < max_win, grow cwnd
* else leave the same
*/
- if (tp->snd_cwnd > max_win) {
+ if (tcp_snd_cwnd(tp) > max_win) {
/* there is congestion, check that it is ok
* to make a CA decision
* 1. We should have at least nv_dec_eval_min_calls
@@ -398,20 +398,20 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
ca->nv_allow_cwnd_growth = 0;
tp->snd_ssthresh =
(nv_ssthresh_factor * max_win) >> 3;
- if (tp->snd_cwnd - max_win > 2) {
+ if (tcp_snd_cwnd(tp) - max_win > 2) {
/* gap > 2, we do exponential cwnd decrease */
int dec;
- dec = max(2U, ((tp->snd_cwnd - max_win) *
+ dec = max(2U, ((tcp_snd_cwnd(tp) - max_win) *
nv_cong_dec_mult) >> 7);
- tp->snd_cwnd -= dec;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec);
} else if (nv_cong_dec_mult > 0) {
- tp->snd_cwnd = max_win;
+ tcp_snd_cwnd_set(tp, max_win);
}
if (ca->cwnd_growth_factor > 0)
ca->cwnd_growth_factor = 0;
ca->nv_no_cong_cnt = 0;
- } else if (tp->snd_cwnd <= max_win - nv_pad_buffer) {
+ } else if (tcp_snd_cwnd(tp) <= max_win - nv_pad_buffer) {
/* There is no congestion, grow cwnd if allowed*/
if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
return;
@@ -444,8 +444,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
* (it wasn't before, if it is now is because nv
* decreased it).
*/
- if (tp->snd_cwnd < nv_min_cwnd)
- tp->snd_cwnd = nv_min_cwnd;
+ if (tcp_snd_cwnd(tp) < nv_min_cwnd)
+ tcp_snd_cwnd_set(tp, nv_min_cwnd);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1ca2f28c9981..5f91a9536e00 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -143,7 +143,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
- u32 cwnd = tp->snd_cwnd;
+ u32 cwnd = tcp_snd_cwnd(tp);
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
@@ -152,7 +152,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
- tp->snd_cwnd = max(cwnd, restart_cwnd);
+ tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_cwnd_used = 0;
}
@@ -1014,7 +1014,7 @@ static void tcp_tsq_write(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tp->lost_out > tp->retrans_out &&
- tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+ tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
tcp_mstamp_refresh(tp);
tcp_xmit_retransmit_queue(sk);
}
@@ -1861,9 +1861,9 @@ static void tcp_cwnd_application_limited(struct sock *sk)
/* Limited by application or receiver window. */
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
u32 win_used = max(tp->snd_cwnd_used, init_win);
- if (win_used < tp->snd_cwnd) {
+ if (win_used < tcp_snd_cwnd(tp)) {
tp->snd_ssthresh = tcp_current_ssthresh(sk);
- tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
+ tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
}
tp->snd_cwnd_used = 0;
}
@@ -2044,7 +2044,7 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
return 1;
in_flight = tcp_packets_in_flight(tp);
- cwnd = tp->snd_cwnd;
+ cwnd = tcp_snd_cwnd(tp);
if (in_flight >= cwnd)
return 0;
@@ -2197,12 +2197,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
in_flight = tcp_packets_in_flight(tp);
BUG_ON(tcp_skb_pcount(skb) <= 1);
- BUG_ON(tp->snd_cwnd <= in_flight);
+ BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
/* From in_flight test above, we know that cwnd > in_flight. */
- cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
+ cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
limit = min(send_win, cong_win);
@@ -2216,7 +2216,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
if (win_divisor) {
- u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
+ u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
/* If at least some fraction of a window is available,
* just use it.
@@ -2346,7 +2346,7 @@ static int tcp_mtu_probe(struct sock *sk)
if (likely(!icsk->icsk_mtup.enabled ||
icsk->icsk_mtup.probe_size ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
- tp->snd_cwnd < 11 ||
+ tcp_snd_cwnd(tp) < 11 ||
tp->rx_opt.num_sacks || tp->rx_opt.dsack))
return -1;
@@ -2382,7 +2382,7 @@ static int tcp_mtu_probe(struct sock *sk)
return 0;
/* Do we need to wait to drain cwnd? With none in flight, don't stall */
- if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
+ if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
if (!tcp_packets_in_flight(tp))
return -1;
else
@@ -2451,7 +2451,7 @@ static int tcp_mtu_probe(struct sock *sk)
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
/* Decrement cwnd here because we are sending
* effectively two packets. */
- tp->snd_cwnd--;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
tcp_event_new_data_sent(sk, nskb);
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
@@ -2709,7 +2709,7 @@ repair:
else
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
if (likely(sent_pkts || is_cwnd_limited))
tcp_cwnd_validate(sk, is_cwnd_limited);
@@ -2819,7 +2819,7 @@ void tcp_send_loss_probe(struct sock *sk)
if (unlikely(!skb)) {
WARN_ONCE(tp->packets_out,
"invalid inflight: %u state %u cwnd %u mss %d\n",
- tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+ tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
inet_csk(sk)->icsk_pending = 0;
return;
}
@@ -3303,7 +3303,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (!hole)
tp->retransmit_skb_hint = skb;
- segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
+ segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
if (segs <= 0)
break;
sacked = TCP_SKB_CB(skb)->sacked;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 9a8e014d9b5b..a8f6d9d06f2e 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -200,7 +200,7 @@ void tcp_rate_check_app_limited(struct sock *sk)
/* Nothing in sending host's qdisc queues or NIC tx queue. */
sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
/* We are not limited by CWND. */
- tcp_packets_in_flight(tp) < tp->snd_cwnd &&
+ tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) &&
/* All lost packets have been retransmitted. */
tp->lost_out <= tp->retrans_out)
tp->app_limited =
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index fd113f6226ef..48f30e7209f2 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -2,11 +2,6 @@
#include <linux/tcp.h>
#include <net/tcp.h>
-static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
-{
- return t1 > t2 || (t1 == t2 && after(seq1, seq2));
-}
-
static u32 tcp_rack_reo_wnd(const struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -77,9 +72,9 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
!(scb->sacked & TCPCB_SACKED_RETRANS))
continue;
- if (!tcp_rack_sent_after(tp->rack.mstamp,
- tcp_skb_timestamp_us(skb),
- tp->rack.end_seq, scb->end_seq))
+ if (!tcp_skb_sent_after(tp->rack.mstamp,
+ tcp_skb_timestamp_us(skb),
+ tp->rack.end_seq, scb->end_seq))
break;
/* A packet is lost if it has not been s/acked beyond
@@ -140,8 +135,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
}
tp->rack.advanced = 1;
tp->rack.rtt_us = rtt_us;
- if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
- end_seq, tp->rack.end_seq)) {
+ if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
+ end_seq, tp->rack.end_seq)) {
tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq;
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 5842081bc8a2..862b96248a92 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -27,7 +27,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!acked)
return;
}
- tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+ tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
acked);
}
@@ -35,7 +35,7 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
+ return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U);
}
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index c8003c8aad2c..786848ad37ea 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
{
- return min(tp->snd_ssthresh, tp->snd_cwnd);
+ return min(tp->snd_ssthresh, tcp_snd_cwnd(tp));
}
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
@@ -217,14 +217,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* This is:
* (actual rate in segments) * baseRTT
*/
- target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
+ target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT;
do_div(target_cwnd, rtt);
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
*/
- diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
+ diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tcp_in_slow_start(tp)) {
/* Going too fast. Time to slow down
@@ -238,7 +238,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* truncation robs us of full link
* utilization.
*/
- tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
+ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
+ (u32)target_cwnd + 1));
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
} else if (tcp_in_slow_start(tp)) {
@@ -254,14 +255,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* The old window was too fast, so
* we slow down.
*/
- tp->snd_cwnd--;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
tp->snd_ssthresh
= tcp_vegas_ssthresh(tp);
} else if (diff < alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
- tp->snd_cwnd++;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
} else {
/* Sending just as fast as we
* should be.
@@ -269,10 +270,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
}
- if (tp->snd_cwnd < 2)
- tp->snd_cwnd = 2;
- else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
- tp->snd_cwnd = tp->snd_cwnd_clamp;
+ if (tcp_snd_cwnd(tp) < 2)
+ tcp_snd_cwnd_set(tp, 2);
+ else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
+ tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
tp->snd_ssthresh = tcp_current_ssthresh(sk);
}
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index cd50a61c9976..366ff6f214b2 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -146,11 +146,11 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
rtt = veno->minrtt;
- target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
+ target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt;
target_cwnd <<= V_PARAM_SHIFT;
do_div(target_cwnd, rtt);
- veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
+ veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd;
if (tcp_in_slow_start(tp)) {
/* Slow start. */
@@ -164,15 +164,15 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* In the "non-congestive state", increase cwnd
* every rtt.
*/
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+ tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
} else {
/* In the "congestive state", increase cwnd
* every other rtt.
*/
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+ if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
if (veno->inc &&
- tp->snd_cwnd < tp->snd_cwnd_clamp) {
- tp->snd_cwnd++;
+ tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
veno->inc = 0;
} else
veno->inc = 1;
@@ -181,10 +181,10 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tp->snd_cwnd_cnt += acked;
}
done:
- if (tp->snd_cwnd < 2)
- tp->snd_cwnd = 2;
- else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
- tp->snd_cwnd = tp->snd_cwnd_clamp;
+ if (tcp_snd_cwnd(tp) < 2)
+ tcp_snd_cwnd_set(tp, 2);
+ else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
+ tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
}
/* Wipe the slate clean for the next rtt. */
/* veno->cntrtt = 0; */
@@ -199,10 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
if (veno->diff < beta)
/* in "non-congestive state", cut cwnd by 1/5 */
- return max(tp->snd_cwnd * 4 / 5, 2U);
+ return max(tcp_snd_cwnd(tp) * 4 / 5, 2U);
else
/* in "congestive state", cut cwnd by 1/2 */
- return max(tp->snd_cwnd >> 1U, 2U);
+ return max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
static struct tcp_congestion_ops tcp_veno __read_mostly = {
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index b2e05c4cea00..c6e97141eef2 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -244,7 +244,8 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
switch (event) {
case CA_EVENT_COMPLETE_CWR:
- tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
+ tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
+ tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
break;
case CA_EVENT_LOSS:
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 07c4c93b9fdb..18b07ff5d20e 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -71,11 +71,11 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!yeah->doing_reno_now) {
/* Scalable */
- tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+ tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
acked);
} else {
/* Reno */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+ tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
}
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
@@ -130,7 +130,7 @@ do_vegas:
/* Compute excess number of packets above bandwidth
* Avoid doing full 64 bit divide.
*/
- bw = tp->snd_cwnd;
+ bw = tcp_snd_cwnd(tp);
bw *= rtt - yeah->vegas.baseRTT;
do_div(bw, rtt);
queue = bw;
@@ -138,20 +138,20 @@ do_vegas:
if (queue > TCP_YEAH_ALPHA ||
rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
if (queue > TCP_YEAH_ALPHA &&
- tp->snd_cwnd > yeah->reno_count) {
+ tcp_snd_cwnd(tp) > yeah->reno_count) {
u32 reduction = min(queue / TCP_YEAH_GAMMA ,
- tp->snd_cwnd >> TCP_YEAH_EPSILON);
+ tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON);
- tp->snd_cwnd -= reduction;
+ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction);
- tp->snd_cwnd = max(tp->snd_cwnd,
- yeah->reno_count);
+ tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
+ yeah->reno_count));
- tp->snd_ssthresh = tp->snd_cwnd;
+ tp->snd_ssthresh = tcp_snd_cwnd(tp);
}
if (yeah->reno_count <= 2)
- yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
+ yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U);
else
yeah->reno_count++;
@@ -176,7 +176,7 @@ do_vegas:
*/
yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt;
yeah->vegas.beg_snd_nxt = tp->snd_nxt;
- yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
+ yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp);
/* Wipe the slate clean for the next RTT. */
yeah->vegas.cntRTT = 0;
@@ -193,16 +193,16 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
if (yeah->doing_reno_now < TCP_YEAH_RHO) {
reduction = yeah->lastQ;
- reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
+ reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U));
- reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
+ reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA);
} else
- reduction = max(tp->snd_cwnd>>1, 2U);
+ reduction = max(tcp_snd_cwnd(tp)>>1, 2U);
yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U);
- return max_t(int, tp->snd_cwnd - reduction, 2);
+ return max_t(int, tcp_snd_cwnd(tp) - reduction, 2);
}
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6b4d8361560f..9d5071c79c95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1726,7 +1726,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
EXPORT_SYMBOL(udp_ioctl);
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *off, int *err)
+ int *off, int *err)
{
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
struct sk_buff_head *queue;
@@ -1735,7 +1735,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
int error;
queue = &udp_sk(sk)->reader_queue;
- flags |= noblock ? MSG_DONTWAIT : 0;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb;
@@ -1805,7 +1804,7 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
struct sk_buff *skb;
int err, used;
- skb = skb_recv_udp(sk, 0, 1, &err);
+ skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
if (!skb)
return err;
@@ -1843,8 +1842,8 @@ EXPORT_SYMBOL(udp_read_sock);
* return it, otherwise we block.
*/
-int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
@@ -1859,7 +1858,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
try_again:
off = sk_peek_offset(sk, flags);
- skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
+ skb = __skb_recv_udp(sk, flags, &off, &err);
if (!skb)
return err;
@@ -1910,7 +1909,7 @@ try_again:
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
/* Copy the address. */
if (sin) {
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index bbe6569c9ad3..ff15918b7bdc 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -11,14 +11,13 @@
static struct proto *udpv6_prot_saved __read_mostly;
static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
- return udpv6_prot_saved->recvmsg(sk, msg, len, noblock, flags,
- addr_len);
+ return udpv6_prot_saved->recvmsg(sk, msg, len, flags, addr_len);
#endif
- return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len);
+ return udp_prot.recvmsg(sk, msg, len, flags, addr_len);
}
static bool udp_sk_has_data(struct sock *sk)
@@ -61,7 +60,7 @@ static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
}
static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct sk_psock *psock;
int copied, ret;
@@ -71,10 +70,10 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
psock = sk_psock_get(sk);
if (unlikely(!psock))
- return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ return sk_udp_recvmsg(sk, msg, len, flags, addr_len);
if (!psock_has_data(psock)) {
- ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len);
goto out;
}
@@ -84,12 +83,12 @@ msg_bytes_ready:
long timeo;
int data;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = udp_msg_wait_data(sk, psock, timeo);
if (data) {
if (psock_has_data(psock))
goto msg_bytes_ready;
- ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len);
goto out;
}
copied = -EAGAIN;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 2878d8285caf..4ba7a88a1b1d 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -17,8 +17,8 @@ int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len);
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
void udp_destroy_sock(struct sock *sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b22504176588..cde242dca530 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -335,7 +335,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
{
int i;
- idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
+ idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
if (!idev->stats.ipv6)
goto err_ip;
@@ -351,7 +351,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
if (!idev->stats.icmpv6dev)
goto err_icmp;
idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!idev->stats.icmpv6msgdev)
goto err_icmpmsg;
@@ -375,7 +375,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
return ERR_PTR(-EINVAL);
- ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
if (!ndev)
return ERR_PTR(err);
@@ -797,6 +797,7 @@ static void dev_forward_change(struct inet6_dev *idev)
{
struct net_device *dev;
struct inet6_ifaddr *ifa;
+ LIST_HEAD(tmp_addr_list);
if (!idev)
return;
@@ -815,14 +816,24 @@ static void dev_forward_change(struct inet6_dev *idev)
}
}
+ read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa->flags&IFA_F_TENTATIVE)
continue;
+ list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
+ }
+ read_unlock_bh(&idev->lock);
+
+ while (!list_empty(&tmp_addr_list)) {
+ ifa = list_first_entry(&tmp_addr_list,
+ struct inet6_ifaddr, if_list_aux);
+ list_del(&ifa->if_list_aux);
if (idev->cnf.forwarding)
addrconf_join_anycast(ifa);
else
addrconf_leave_anycast(ifa);
}
+
inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
NETCONFA_FORWARDING,
dev->ifindex, &idev->cnf);
@@ -3728,7 +3739,8 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
struct net *net = dev_net(dev);
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa, *tmp;
+ struct inet6_ifaddr *ifa;
+ LIST_HEAD(tmp_addr_list);
bool keep_addr = false;
bool was_ready;
int state, i;
@@ -3820,16 +3832,23 @@ restart:
write_lock_bh(&idev->lock);
}
- list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+ list_for_each_entry(ifa, &idev->addr_list, if_list)
+ list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
+ write_unlock_bh(&idev->lock);
+
+ while (!list_empty(&tmp_addr_list)) {
struct fib6_info *rt = NULL;
bool keep;
+ ifa = list_first_entry(&tmp_addr_list,
+ struct inet6_ifaddr, if_list_aux);
+ list_del(&ifa->if_list_aux);
+
addrconf_del_dad_work(ifa);
keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
!addr_is_local(&ifa->addr);
- write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
if (keep) {
@@ -3860,15 +3879,14 @@ restart:
addrconf_leave_solict(ifa->idev, &ifa->addr);
}
- write_lock_bh(&idev->lock);
if (!keep) {
+ write_lock_bh(&idev->lock);
list_del_rcu(&ifa->if_list);
+ write_unlock_bh(&idev->lock);
in6_ifa_put(ifa);
}
}
- write_unlock_bh(&idev->lock);
-
/* Step 5: Discard anycast and multicast list */
if (unregister) {
ipv6_ac_destroy_dev(idev);
@@ -4201,7 +4219,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
ifp->idev->cnf.rtr_solicits != 0 &&
- (dev->flags&IFF_LOOPBACK) == 0;
+ (dev->flags & IFF_LOOPBACK) == 0 &&
+ (dev->type != ARPHRD_TUNNEL);
read_unlock_bh(&ifp->idev->lock);
/* While dad is in progress mld report's source address is in6_addrany.
@@ -5569,6 +5588,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
+ array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na;
}
static inline size_t inet6_ifla6_size(void)
@@ -7020,6 +7040,15 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra2 = (void *)SYSCTL_ONE,
},
{
+ .procname = "accept_unsolicited_na",
+ .data = &ipv6_devconf.accept_unsolicited_na,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *)SYSCTL_ZERO,
+ .extra2 = (void *)SYSCTL_ONE,
+ },
+ {
/* sentinel */
}
};
@@ -7031,7 +7060,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
struct ctl_table *table;
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
- table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
+ table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
if (!table)
goto out;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7d7b7523d126..70564ddccc46 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -318,7 +318,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Binding to v4-mapped address on a v6-only socket
* makes no sense
*/
- if (sk->sk_ipv6only) {
+ if (ipv6_only_sock(sk)) {
err = -EINVAL;
goto out;
}
@@ -654,7 +654,7 @@ int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
INDIRECT_CALLABLE_DECLARE(int udpv6_recvmsg(struct sock *, struct msghdr *,
- size_t, int, int, int *));
+ size_t, int, int *));
int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
@@ -669,8 +669,7 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
prot = READ_ONCE(sk->sk_prot);
err = INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udpv6_recvmsg,
- sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
+ sk, msg, size, flags, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 206f66310a88..39b2327edc4e 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -145,7 +145,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
int err;
if (usin->sin6_family == AF_INET) {
- if (__ipv6_only_sock(sk))
+ if (ipv6_only_sock(sk))
return -EAFNOSUPPORT;
err = __ip4_datagram_connect(sk, uaddr, addr_len);
goto ipv4_connected;
@@ -178,7 +178,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
- if (__ipv6_only_sock(sk)) {
+ if (ipv6_only_sock(sk)) {
err = -ENETUNREACH;
goto out;
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 658d5eabaf7e..a8d961d3a477 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -90,12 +90,13 @@ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
break;
fallthrough;
case 2: /* send ICMP PARM PROB regardless and drop packet */
- icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
+ icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff,
+ SKB_DROP_REASON_UNHANDLED_PROTO);
return false;
}
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
return false;
}
@@ -218,7 +219,7 @@ static bool ip6_parse_tlv(bool hopbyhop,
if (len == 0)
return true;
bad:
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
return false;
}
@@ -232,6 +233,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
struct ipv6_destopt_hao *hao;
struct inet6_skb_parm *opt = IP6CB(skb);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ SKB_DR(reason);
int ret;
if (opt->dsthao) {
@@ -246,19 +248,23 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
if (hao->length != 16) {
net_dbg_ratelimited("hao invalid option length = %d\n",
hao->length);
+ SKB_DR_SET(reason, IP_INHDR);
goto discard;
}
if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
&hao->addr);
+ SKB_DR_SET(reason, INVALID_PROTO);
goto discard;
}
ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
(xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ SKB_DR_SET(reason, XFRM_POLICY);
goto discard;
+ }
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
@@ -281,7 +287,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
return true;
discard:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return false;
}
#endif
@@ -487,7 +493,6 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev;
struct ipv6hdr *oldhdr;
- struct in6_addr addr;
unsigned char *buf;
int accept_rpl_seg;
int i, err;
@@ -616,9 +621,7 @@ looped_back:
return -1;
}
- addr = ipv6_hdr(skb)->daddr;
- ipv6_hdr(skb)->daddr = ohdr->rpl_segaddr[i];
- ohdr->rpl_segaddr[i] = addr;
+ swap(ipv6_hdr(skb)->daddr, ohdr->rpl_segaddr[i]);
ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
@@ -934,7 +937,7 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
}
net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
nh[optoff + 1]);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
return false;
}
@@ -988,7 +991,7 @@ ignore:
return true;
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
return false;
}
@@ -997,31 +1000,30 @@ drop:
static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
- struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
- struct net *net = ipv6_skb_net(skb);
+ SKB_DR(reason);
u32 pkt_len;
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
nh[optoff+1]);
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ SKB_DR_SET(reason, IP_INHDR);
goto drop;
}
pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
if (pkt_len <= IPV6_MAXPLEN) {
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
+ icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff + 2,
+ SKB_DROP_REASON_IP_INHDR);
return false;
}
if (ipv6_hdr(skb)->payload_len) {
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
+ icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff,
+ SKB_DROP_REASON_IP_INHDR);
return false;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ SKB_DR_SET(reason, PKT_TOO_SMALL);
goto drop;
}
@@ -1032,7 +1034,7 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
return true;
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return false;
}
@@ -1054,7 +1056,7 @@ static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
return true;
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
return false;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index e6b978ea0e87..61770220774e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -629,12 +629,13 @@ out_bh_enable:
}
EXPORT_SYMBOL(icmp6_send);
-/* Slightly more convenient version of icmp6_send.
+/* Slightly more convenient version of icmp6_send with drop reasons.
*/
-void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+void icmpv6_param_prob_reason(struct sk_buff *skb, u8 code, int pos,
+ enum skb_drop_reason reason)
{
icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
}
/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
@@ -864,21 +865,23 @@ out:
static int icmpv6_rcv(struct sk_buff *skb)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct net *net = dev_net(skb->dev);
struct net_device *dev = icmp6_dev(skb);
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
struct icmp6hdr *hdr;
u8 type;
- bool success = false;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
- XFRM_STATE_ICMP))
+ XFRM_STATE_ICMP)) {
+ reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop_no_count;
+ }
if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
goto drop_no_count;
@@ -886,8 +889,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*hdr));
- if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
+ if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN,
+ skb)) {
+ reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop_no_count;
+ }
skb_set_network_header(skb, nh);
}
@@ -924,11 +930,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
break;
case ICMPV6_ECHO_REPLY:
- success = ping_rcv(skb);
+ reason = ping_rcv(skb);
break;
case ICMPV6_EXT_ECHO_REPLY:
- success = ping_rcv(skb);
+ reason = ping_rcv(skb);
break;
case ICMPV6_PKT_TOOBIG:
@@ -994,19 +1000,20 @@ static int icmpv6_rcv(struct sk_buff *skb)
/* until the v6 path can be better sorted assume failure and
* preserve the status quo behaviour for the rest of the paths to here
*/
- if (success)
- consume_skb(skb);
+ if (reason)
+ kfree_skb_reason(skb, reason);
else
- kfree_skb(skb);
+ consume_skb(skb);
return 0;
csum_error:
+ reason = SKB_DROP_REASON_ICMP_CSUM;
__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
discard_it:
__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
drop_no_count:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return 0;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 5136959b3dc5..4e37f7c29900 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -382,11 +382,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
goto failed_free;
ip6gre_tnl_link_config(nt, 1);
-
- /* Can use a lockless transmit, unless we generate output sequences */
- if (!(nt->parms.o_flags & TUNNEL_SEQ))
- dev->features |= NETIF_F_LLTX;
-
ip6gre_tunnel_link(ign, nt);
return nt;
@@ -1445,26 +1440,23 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static void ip6gre_tnl_init_features(struct net_device *dev)
{
struct ip6_tnl *nt = netdev_priv(dev);
+ __be16 flags;
- dev->features |= GRE6_FEATURES;
+ dev->features |= GRE6_FEATURES | NETIF_F_LLTX;
dev->hw_features |= GRE6_FEATURES;
- if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
- /* TCP offload with GRE SEQ is not supported, nor
- * can we support 2 levels of outer headers requiring
- * an update.
- */
- if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
- nt->encap.type == TUNNEL_ENCAP_NONE) {
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- }
+ flags = nt->parms.o_flags;
- /* Can use a lockless transmit, unless we generate
- * output sequences
- */
- dev->features |= NETIF_F_LLTX;
- }
+ /* TCP offload with GRE SEQ is not supported, nor can we support 2
+ * levels of outer headers requiring an update.
+ */
+ if (flags & TUNNEL_SEQ)
+ return;
+ if (flags & TUNNEL_CSUM && nt->encap.type != TUNNEL_ENCAP_NONE)
+ return;
+
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b5ea35635f9..0322cc86b84e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -145,12 +145,14 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
+ enum skb_drop_reason reason;
const struct ipv6hdr *hdr;
u32 pkt_len;
struct inet6_dev *idev;
if (skb->pkt_type == PACKET_OTHERHOST) {
- kfree_skb(skb);
+ dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
+ kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST);
return NULL;
}
@@ -160,9 +162,12 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
+ SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+ if (idev && unlikely(idev->cnf.disable_ipv6))
+ SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
@@ -186,8 +191,10 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
hdr = ipv6_hdr(skb);
- if (hdr->version != 6)
+ if (hdr->version != 6) {
+ SKB_DR_SET(reason, UNHANDLED_PROTO);
goto err;
+ }
__IP6_ADD_STATS(net, idev,
IPSTATS_MIB_NOECTPKTS +
@@ -225,8 +232,10 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
- idev->cnf.drop_unicast_in_l2_multicast)
+ idev->cnf.drop_unicast_in_l2_multicast) {
+ SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
+ }
/* RFC4291 2.7
* Nodes must not originate a packet to a multicast address whose scope
@@ -255,12 +264,11 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
__IP6_INC_STATS(net,
idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ SKB_DR_SET(reason, PKT_TOO_SMALL);
goto drop;
}
- if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
- goto drop;
- }
+ if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
+ goto err;
hdr = ipv6_hdr(skb);
}
@@ -281,9 +289,10 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
return skb;
err:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ SKB_DR_OR(reason, IP_INHDR);
drop:
rcu_read_unlock();
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return NULL;
}
@@ -353,6 +362,7 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
const struct inet6_protocol *ipprot;
struct inet6_dev *idev;
unsigned int nhoff;
+ SKB_DR(reason);
bool raw;
/*
@@ -412,12 +422,16 @@ resubmit_final:
if (ipv6_addr_is_multicast(&hdr->daddr) &&
!ipv6_chk_mcast_addr(dev, &hdr->daddr,
&hdr->saddr) &&
- !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
+ !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) {
+ SKB_DR_SET(reason, IP_INADDRERRORS);
goto discard;
+ }
}
if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
- !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+ !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ SKB_DR_SET(reason, XFRM_POLICY);
goto discard;
+ }
ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
skb);
@@ -443,8 +457,11 @@ resubmit_final:
IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff);
+ SKB_DR_SET(reason, IP_NOPROTO);
+ } else {
+ SKB_DR_SET(reason, XFRM_POLICY);
}
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
} else {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
consume_skb(skb);
@@ -454,7 +471,7 @@ resubmit_final:
discard:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
}
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index fa63ef2bd99c..afa5bd4ad167 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -119,19 +119,21 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
rcu_read_lock_bh();
nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
- if (unlikely(!neigh))
- neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
- if (!IS_ERR(neigh)) {
- sock_confirm_neigh(skb, neigh);
- ret = neigh_output(neigh, skb, false);
- rcu_read_unlock_bh();
- return ret;
+
+ if (unlikely(IS_ERR_OR_NULL(neigh))) {
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
+ if (IS_ERR(neigh)) {
+ rcu_read_unlock_bh();
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
+ return -EINVAL;
+ }
}
+ sock_confirm_neigh(skb, neigh);
+ ret = neigh_output(neigh, skb, false);
rcu_read_unlock_bh();
-
- IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
- kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
- return -EINVAL;
+ return ret;
}
static int
@@ -198,7 +200,6 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
switch (ret) {
case NET_XMIT_SUCCESS:
- return __ip6_finish_output(net, sk, skb);
case NET_XMIT_CN:
return __ip6_finish_output(net, sk, skb) ? : ret;
default:
@@ -469,6 +470,7 @@ int ip6_forward(struct sk_buff *skb)
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
+ SKB_DR(reason);
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
@@ -518,7 +520,7 @@ int ip6_forward(struct sk_buff *skb)
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR);
return -ETIMEDOUT;
}
@@ -537,6 +539,7 @@ int ip6_forward(struct sk_buff *skb)
if (!xfrm6_route_forward(skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+ SKB_DR_SET(reason, XFRM_POLICY);
goto drop;
}
dst = skb_dst(skb);
@@ -596,7 +599,7 @@ int ip6_forward(struct sk_buff *skb)
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_FRAGFAILS);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
return -EMSGSIZE;
}
@@ -618,8 +621,9 @@ int ip6_forward(struct sk_buff *skb)
error:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
+ SKB_DR_SET(reason, IP_INADDRERRORS);
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return -EINVAL;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 53f632a560ec..19325b7600bb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -257,8 +257,6 @@ static int ip6_tnl_create2(struct net_device *dev)
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
- t = netdev_priv(dev);
-
dev->rtnl_link_ops = &ip6_link_ops;
err = register_netdevice(dev);
if (err < 0)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index fcb288b0ae13..254addad0dd3 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -979,6 +979,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
struct inet6_dev *idev = __in6_dev_get(dev);
struct inet6_ifaddr *ifp;
struct neighbour *neigh;
+ bool create_neigh;
if (skb->len < sizeof(struct nd_msg)) {
ND_PRINTK(2, warn, "NA: packet too short\n");
@@ -999,6 +1000,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
/* For some 802.11 wireless deployments (and possibly other networks),
* there will be a NA proxy and unsolicitd packets are attacks
* and thus should not be accepted.
+ * drop_unsolicited_na takes precedence over accept_unsolicited_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
idev->cnf.drop_unsolicited_na)
@@ -1039,7 +1041,23 @@ static void ndisc_recv_na(struct sk_buff *skb)
in6_ifa_put(ifp);
return;
}
- neigh = neigh_lookup(&nd_tbl, &msg->target, dev);
+ /* RFC 9131 updates original Neighbour Discovery RFC 4861.
+ * An unsolicited NA can now create a neighbour cache entry
+ * on routers if it has Target LL Address option.
+ *
+ * drop accept fwding behaviour
+ * ---- ------ ------ ----------------------------------------------
+ * 1 X X Drop NA packet and don't pass up the stack
+ * 0 0 X Pass NA packet up the stack, don't update NC
+ * 0 1 0 Pass NA packet up the stack, don't update NC
+ * 0 1 1 Pass NA packet up the stack, and add a STALE
+ * NC entry
+ * Note that we don't do a (daddr == all-routers-mcast) check.
+ */
+ create_neigh = !msg->icmph.icmp6_solicited && lladdr &&
+ idev && idev->cnf.forwarding &&
+ idev->cnf.accept_unsolicited_na;
+ neigh = __neigh_lookup(&nd_tbl, &msg->target, dev, create_neigh);
if (neigh) {
u8 old_flags = neigh->flags;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index b3f163b40c2b..8970d0b4faeb 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -30,6 +30,10 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
fl6->daddr = iph->daddr;
fl6->saddr = iph->saddr;
} else {
+ if (nft_hook(pkt) == NF_INET_FORWARD &&
+ priv->flags & NFTA_FIB_F_IIF)
+ fl6->flowi6_iif = nft_out(pkt)->ifindex;
+
fl6->daddr = iph->saddr;
fl6->saddr = iph->daddr;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c51d5ce3711c..3b7cbd522b54 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -460,7 +460,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
*/
static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -477,7 +477,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -512,7 +512,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
*addr_len = sizeof(*sin6);
}
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c4b6ce017d5e..d25dc83bac62 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4483,6 +4483,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
struct dst_entry *dst = skb_dst(skb);
struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
+ SKB_DR(reason);
int type;
if (netif_is_l3_master(skb->dev) ||
@@ -4495,11 +4496,14 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
if (type == IPV6_ADDR_ANY) {
+ SKB_DR_SET(reason, IP_INADDRERRORS);
IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
break;
}
+ SKB_DR_SET(reason, IP_INNOROUTES);
fallthrough;
case IPSTATS_MIB_OUTNOROUTES:
+ SKB_DR_OR(reason, IP_OUTNOROUTES);
IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
break;
}
@@ -4509,7 +4513,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
skb_dst_drop(skb);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return 0;
}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index d53dd142bf87..94a0a294c6a1 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -23,8 +23,6 @@
#endif
#include <linux/ioam6.h>
-static int two = 2;
-static int three = 3;
static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
static u32 rt6_multipath_hash_fields_all_mask =
@@ -172,7 +170,7 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = &three,
+ .extra2 = SYSCTL_THREE,
},
{
.procname = "fib_multipath_hash_fields",
@@ -197,7 +195,7 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "ioam6_id",
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 13678d3908fa..60bdec257ba7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -230,7 +230,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
- if (__ipv6_only_sock(sk))
+ if (ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
@@ -1728,7 +1728,6 @@ process:
sk_incoming_cpu_update(sk);
- sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
@@ -2044,7 +2043,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
- tp->snd_cwnd,
+ tcp_snd_cwnd(tp),
state == TCP_LISTEN ?
fastopenq->max_qlen :
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7f0fa9bd9ffe..3fc97d4621ac 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ static int udp6_skb_len(struct sk_buff *skb)
*/
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
@@ -342,7 +342,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
try_again:
off = sk_peek_offset(sk, flags);
- skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
+ skb = __skb_recv_udp(sk, flags, &off, &err);
if (!skb)
return err;
@@ -391,7 +391,7 @@ try_again:
if (!peeking)
SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
@@ -1123,7 +1123,7 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
* bytes that are out of the bound specified by user in addr_len.
*/
if (uaddr->sa_family == AF_INET) {
- if (__ipv6_only_sock(sk))
+ if (ipv6_only_sock(sk))
return -EAFNOSUPPORT;
return udp_pre_connect(sk, uaddr, addr_len);
}
@@ -1359,7 +1359,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
- if (__ipv6_only_sock(sk))
+ if (ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(sk, msg, len);
}
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index b2fcc46c1630..4251e49d32a0 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -20,8 +20,8 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
-int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len);
+int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ int *addr_len);
void udpv6_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a1760add5bf1..a0385ddbffcf 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1223,7 +1223,6 @@ static void iucv_process_message_q(struct sock *sk)
static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
@@ -1242,7 +1241,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
/* receive/dequeue next skb:
* the function understands MSG_PEEK and, thus, does not dequeue skb */
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index fd51db3be91c..175a162eec58 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3696,7 +3696,7 @@ static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
goto out;
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
@@ -3711,7 +3711,7 @@ static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (err)
goto out_free;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
err = (flags & MSG_TRUNC) ? skb->len : copied;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index b3edafa5fba4..6af09e188e52 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -515,7 +515,7 @@ no_route:
}
static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+ size_t len, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
@@ -526,7 +526,7 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
if (flags & MSG_OOB)
goto out;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 96f975777438..217c7192691e 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -657,7 +657,7 @@ do_confirm:
}
static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
@@ -671,7 +671,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bf35710127dd..8be1fdc68a0b 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -191,8 +191,7 @@ static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
goto end;
err = 0;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto end;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 218cdc554d71..bfab39320004 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -263,7 +263,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
- if (sta->sta.he_cap.has_he && addbaext)
+ if (sta->sta.deflink.he_cap.has_he && addbaext)
ieee80211_add_addbaext(sdata, skb, addbaext, buf_size);
ieee80211_tx_skb(sdata, skb);
@@ -296,7 +296,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
goto end;
}
- if (!sta->sta.ht_cap.ht_supported &&
+ if (!sta->sta.deflink.ht_cap.ht_supported &&
sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) {
ht_dbg(sta->sdata,
"STA %pM erroneously requests BA session on tid %d w/o QoS\n",
@@ -312,9 +312,9 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
goto end;
}
- if (sta->sta.eht_cap.has_eht)
+ if (sta->sta.deflink.eht_cap.has_eht)
max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT;
- else if (sta->sta.he_cap.has_he)
+ else if (sta->sta.deflink.he_cap.has_he)
max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE;
else
max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
@@ -324,7 +324,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
* and if buffer size does not exceeds max value */
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1) &&
- (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
+ (!(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
(buf_size > max_buf_size)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
ht_dbg_ratelimited(sta->sdata,
@@ -507,7 +507,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto free;
}
- if (sta->sta.eht_cap.has_eht && elems && elems->addba_ext_ie) {
+ if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) {
u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data,
IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1deb3d874a4b..91878ed5ec46 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -467,7 +467,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
- if (sta->sta.he_cap.has_he) {
+ if (sta->sta.deflink.he_cap.has_he) {
buf_size = local->hw.max_tx_aggregation_subframes;
} else {
/*
@@ -594,7 +594,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
"Requested to start BA session on reserved tid=%d", tid))
return -EINVAL;
- if (!pubsta->ht_cap.ht_supported &&
+ if (!pubsta->deflink.ht_cap.ht_supported &&
sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
return -EINVAL;
@@ -647,7 +647,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
* is set when we receive a bss info from a probe response or a beacon.
*/
if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- !sta->sta.ht_cap.ht_supported) {
+ !sta->sta.deflink.ht_cap.ht_supported) {
ht_dbg(sdata,
"BA request denied - IBSS STA %pM does not advertise HT support\n",
pubsta->addr);
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 2619e12c8bda..4bab1683652d 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -647,8 +647,8 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
struct ieee80211_rx_status stat;
- struct ieee80211_tx_rate *tx_rate = &sta->tx_stats.last_rate;
- struct rate_info *ri = &sta->tx_stats.last_rate_info;
+ struct ieee80211_tx_rate *tx_rate = &sta->deflink.tx_stats.last_rate;
+ struct rate_info *ri = &sta->deflink.tx_stats.last_rate_info;
u32 duration, overhead;
u8 agg_shift;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ba752539d1d9..f1d211e61e49 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -570,7 +570,8 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
if (pairwise)
key = key_mtx_dereference(local, sta->ptk[key_idx]);
else
- key = key_mtx_dereference(local, sta->gtk[key_idx]);
+ key = key_mtx_dereference(local,
+ sta->deflink.gtk[key_idx]);
} else
key = key_mtx_dereference(local, sdata->keys[key_idx]);
@@ -620,7 +621,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
else if (!pairwise &&
key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
NUM_DEFAULT_BEACON_KEYS)
- key = rcu_dereference(sta->gtk[key_idx]);
+ key = rcu_dereference(sta->deflink.gtk[key_idx]);
} else
key = rcu_dereference(sdata->keys[key_idx]);
@@ -1728,9 +1729,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta->listen_interval = params->listen_interval;
if (params->sta_modify_mask & STATION_PARAM_APPLY_STA_TXPOWER) {
- sta->sta.txpwr.type = params->txpwr.type;
+ sta->sta.deflink.txpwr.type = params->txpwr.type;
if (params->txpwr.type == NL80211_TX_POWER_LIMITED)
- sta->sta.txpwr.power = params->txpwr.power;
+ sta->sta.deflink.txpwr.power = params->txpwr.power;
ret = drv_sta_set_txpwr(local, sdata, sta);
if (ret)
return ret;
@@ -1740,7 +1741,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
sband, params->supported_rates,
params->supported_rates_len,
- &sta->sta.supp_rates[sband->band]);
+ &sta->sta.deflink.supp_rates[sband->band]);
}
if (params->ht_capa)
@@ -3306,13 +3307,14 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
+ if (!sdata->u.ap.next_beacon)
+ return -EINVAL;
+
err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
NULL, NULL);
- if (sdata->u.ap.next_beacon) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
- }
+ kfree(sdata->u.ap.next_beacon->mbssid_ies);
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
if (err < 0)
return err;
@@ -4314,13 +4316,14 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP: {
int ret;
+ if (!sdata->u.ap.next_beacon)
+ return -EINVAL;
+
ret = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
NULL, NULL);
- if (sdata->u.ap.next_beacon) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
- }
+ kfree(sdata->u.ap.next_beacon->mbssid_ies);
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
if (ret < 0)
return ret;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e26d42de14ec..e3452445b363 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -199,7 +199,7 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta)
switch (width) {
case IEEE80211_STA_RX_BW_20:
- if (sta->sta.ht_cap.ht_supported)
+ if (sta->sta.deflink.ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20;
else
return NL80211_CHAN_WIDTH_20_NOHT;
@@ -375,15 +375,15 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
new_sta_bw = ieee80211_sta_cur_vht_bw(sta);
/* nothing change */
- if (new_sta_bw == sta->sta.bandwidth)
+ if (new_sta_bw == sta->sta.deflink.bandwidth)
continue;
/* vif changed to narrow BW and narrow BW for station wasn't
* requested or vise versa */
- if ((new_sta_bw < sta->sta.bandwidth) == !narrowed)
+ if ((new_sta_bw < sta->sta.deflink.bandwidth) == !narrowed)
continue;
- sta->sta.bandwidth = new_sta_bw;
+ sta->sta.deflink.bandwidth = new_sta_bw;
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_BW_CHANGED);
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index f4c9a92f50f9..1fe43b264d75 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -504,6 +504,7 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_TX_ENCAP_OFFLOAD),
FLAG(SUPPORTS_RX_DECAP_OFFLOAD),
FLAG(SUPPORTS_CONC_MON_RX_DECAP),
+ FLAG(DETECTS_COLOR_COLLISION),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 88d9cc945a21..182094be9001 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -447,7 +447,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
int i;
ssize_t bufsz = 512;
struct sta_info *sta = file->private_data;
- struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
+ struct ieee80211_sta_ht_cap *htc = &sta->sta.deflink.ht_cap;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -531,7 +531,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
{
char *buf, *p;
struct sta_info *sta = file->private_data;
- struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap;
+ struct ieee80211_sta_vht_cap *vhtc = &sta->sta.deflink.vht_cap;
ssize_t ret;
ssize_t bufsz = 512;
@@ -646,7 +646,7 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
char *buf, *p;
size_t buf_sz = PAGE_SIZE;
struct sta_info *sta = file->private_data;
- struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap;
+ struct ieee80211_sta_he_cap *hec = &sta->sta.deflink.he_cap;
struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp;
u8 ppe_size;
u8 *cap;
@@ -1052,9 +1052,9 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(vht_capa);
DEBUGFS_ADD(he_capa);
- DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
- DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
- DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
+ DEBUGFS_ADD_COUNTER(rx_duplicates, deflink.rx_stats.num_duplicates);
+ DEBUGFS_ADD_COUNTER(rx_fragments, deflink.rx_stats.fragments);
+ DEBUGFS_ADD_COUNTER(tx_filtered, deflink.status_stats.filtered);
if (local->ops->wake_tx_queue) {
DEBUGFS_ADD(aqm);
diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c
index 364ad0ef7692..96c9486bf2fe 100644
--- a/net/mac80211/eht.c
+++ b/net/mac80211/eht.c
@@ -14,7 +14,7 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len, struct sta_info *sta)
{
- struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 eht_ppe_size = 0;
u8 mcs_nss_size;
@@ -71,6 +71,6 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
eht_cap->has_eht = true;
- sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
- sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
+ sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
}
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index b2253df54413..31cd3c1ac07f 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -114,7 +114,7 @@ static void ieee80211_get_stats(struct net_device *dev,
sta_set_sinfo(sta, &sinfo, false);
i = 0;
- ADD_STA_STATS(sta);
+ ADD_STA_STATS(sta->link[0]);
data[i++] = sta->sta_state;
@@ -140,7 +140,7 @@ static void ieee80211_get_stats(struct net_device *dev,
memset(&sinfo, 0, sizeof(sinfo));
sta_set_sinfo(sta, &sinfo, false);
i = 0;
- ADD_STA_STATS(sta);
+ ADD_STA_STATS(sta->link[0]);
}
}
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index c05af7018f79..1a61f7552edd 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -49,7 +49,7 @@ ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_
break;
}
- sta->sta.he_6ghz_capa = *he_6ghz_capa;
+ sta->sta.deflink.he_6ghz_capa = *he_6ghz_capa;
}
static void ieee80211_he_mcs_disable(__le16 *he_mcs)
@@ -110,7 +110,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
struct sta_info *sta)
{
- struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
struct ieee80211_sta_he_cap own_he_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 he_ppe_size;
@@ -153,8 +153,8 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
he_cap->has_he = true;
- sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
- sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
+ sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa)
ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, sta);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 2eb7641f5556..171bd16b13f3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -243,9 +243,9 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
apply:
- changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
+ changed = memcmp(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap));
- memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
+ memcpy(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap));
switch (sdata->vif.bss_conf.chandef.width) {
default:
@@ -264,9 +264,9 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
break;
}
- sta->sta.bandwidth = bw;
+ sta->sta.deflink.bandwidth = bw;
- sta->cur_max_bandwidth =
+ sta->deflink.cur_max_bandwidth =
ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0416c4d22292..14c04fd48b7a 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -637,7 +637,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
- sta->sta.supp_rates[band] = supp_rates |
+ sta->sta.deflink.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(sband, scan_width);
return ieee80211_ibss_finish_sta(sta);
@@ -1005,7 +1005,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
if (sta) {
u32 prev_rates;
- prev_rates = sta->sta.supp_rates[band];
+ prev_rates = sta->sta.deflink.supp_rates[band];
/* make sure mandatory rates are always added */
scan_width = NL80211_BSS_CHAN_WIDTH_20;
if (rx_status->bw == RATE_INFO_BW_5)
@@ -1013,13 +1013,13 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
else if (rx_status->bw == RATE_INFO_BW_10)
scan_width = NL80211_BSS_CHAN_WIDTH_10;
- sta->sta.supp_rates[band] = supp_rates |
+ sta->sta.deflink.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(sband, scan_width);
- if (sta->sta.supp_rates[band] != prev_rates) {
+ if (sta->sta.deflink.supp_rates[band] != prev_rates) {
ibss_dbg(sdata,
"updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
sta->sta.addr, prev_rates,
- sta->sta.supp_rates[band]);
+ sta->sta.deflink.supp_rates[band]);
rates_updated = true;
}
} else {
@@ -1043,7 +1043,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
/* we both use HT */
struct ieee80211_ht_cap htcap_ie;
struct cfg80211_chan_def chandef;
- enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
+ enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth;
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(elems->ht_operation, &chandef);
@@ -1058,7 +1058,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) {
/* we both use VHT */
struct ieee80211_vht_cap cap_ie;
- struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
+ struct ieee80211_sta_vht_cap cap = sta->sta.deflink.vht_cap;
u32 vht_cap_info =
le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
@@ -1069,11 +1069,11 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
&cap_ie, sta);
- if (memcmp(&cap, &sta->sta.vht_cap, sizeof(cap)))
+ if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
rates_updated |= true;
}
- if (bw != sta->sta.bandwidth)
+ if (bw != sta->sta.deflink.bandwidth)
rates_updated |= true;
if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef,
@@ -1083,12 +1083,12 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
if (sta && rates_updated) {
u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
- u8 rx_nss = sta->sta.rx_nss;
+ u8 rx_nss = sta->sta.deflink.rx_nss;
/* Force rx_nss recalculation */
- sta->sta.rx_nss = 0;
+ sta->sta.deflink.rx_nss = 0;
rate_control_rate_init(sta);
- if (sta->sta.rx_nss != rx_nss)
+ if (sta->sta.deflink.rx_nss != rx_nss)
changed |= IEEE80211_RC_NSS_CHANGED;
drv_sta_rc_update(local, sdata, &sta->sta, changed);
@@ -1235,7 +1235,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
- sta->sta.supp_rates[band] = supp_rates |
+ sta->sta.deflink.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifibss->incomplete_lock);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index f695fc80088b..0fcf8aebedc4 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -476,7 +476,7 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
!(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
_ieee80211_set_tx_key(new, true);
} else {
- rcu_assign_pointer(sta->gtk[idx], new);
+ rcu_assign_pointer(sta->deflink.gtk[idx], new);
}
/* Only needed for transition from no key -> key.
* Still triggers unnecessary when using Extended Key ID
@@ -826,7 +826,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
(old_key && old_key->conf.cipher != key->conf.cipher))
goto out;
} else if (sta) {
- old_key = key_mtx_dereference(sdata->local, sta->gtk[idx]);
+ old_key = key_mtx_dereference(sdata->local,
+ sta->deflink.gtk[idx]);
} else {
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
}
@@ -1076,8 +1077,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
- for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
- key = key_mtx_dereference(local, sta->gtk[i]);
+ for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) {
+ key = key_mtx_dereference(local, sta->deflink.gtk[i]);
if (!key)
continue;
ieee80211_key_replace(key->sdata, key->sta,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 44a6fdb6efbd..58ebdcd69d05 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -310,7 +310,7 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
LINK_FAIL_THRESH)
mesh_plink_broken(sta);
- sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
+ sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo);
ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
cfg80211_calculate_bitrate(&rinfo));
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a829470dd59e..42ba7424589e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -61,8 +61,8 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
(sta &&
- (s8)-ewma_signal_read(&sta->rx_stats_avg.signal) >
- rssi_threshold);
+ (s8)-ewma_signal_read(&sta->deflink.rx_stats_avg.signal) >
+ rssi_threshold);
}
/**
@@ -125,7 +125,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
continue;
short_slot = false;
- if (erp_rates & sta->sta.supp_rates[sband->band])
+ if (erp_rates & sta->sta.deflink.supp_rates[sband->band])
short_slot = true;
else
break;
@@ -175,10 +175,10 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
- if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
+ if (sta->sta.deflink.bandwidth > IEEE80211_STA_RX_BW_20)
continue;
- if (!sta->sta.ht_cap.ht_supported) {
+ if (!sta->sta.deflink.ht_cap.ht_supported) {
mpl_dbg(sdata, "nonHT sta (%pM) is present\n",
sta->sta.addr);
non_ht_sta = true;
@@ -415,7 +415,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
u32 rates, basic_rates = 0, changed = 0;
- enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
+ enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth;
sband = ieee80211_get_sband(sdata);
if (!sband)
@@ -425,7 +425,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
&basic_rates);
spin_lock_bh(&sta->mesh->plink_lock);
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
/* rates and capabilities don't change during peering */
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
@@ -433,9 +433,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
goto out;
sta->mesh->processed_beacon = true;
- if (sta->sta.supp_rates[sband->band] != rates)
+ if (sta->sta.deflink.supp_rates[sband->band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
- sta->sta.supp_rates[sband->band] = rates;
+ sta->sta.deflink.supp_rates[sband->band] = rates;
if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem, sta))
@@ -449,16 +449,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
elems->he_6ghz_capa,
sta);
- if (bw != sta->sta.bandwidth)
+ if (bw != sta->sta.deflink.bandwidth)
changed |= IEEE80211_RC_BW_CHANGED;
/* HT peer is operating 20MHz-only */
if (elems->ht_operation &&
!(elems->ht_operation->ht_param &
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
- if (sta->sta.bandwidth != IEEE80211_STA_RX_BW_20)
+ if (sta->sta.deflink.bandwidth != IEEE80211_STA_RX_BW_20)
changed |= IEEE80211_RC_BW_CHANGED;
- sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
+ sta->sta.deflink.bandwidth = IEEE80211_STA_RX_BW_20;
}
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1b30c724ca8d..b857915881e0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3342,7 +3342,7 @@ static bool ieee80211_twt_req_supported(const struct sta_info *sta,
if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT))
return false;
- return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] &
+ return sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_TWT_RES;
}
@@ -3369,7 +3369,7 @@ static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata,
ieee80211_vif_type_p2p(&sdata->vif));
return bss_conf->he_support &&
- (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+ (sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_BCAST_TWT) &&
own_he_cap &&
(own_he_cap->he_cap_elem.mac_cap_info[2] &
@@ -3587,7 +3587,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
elems->he_6ghz_capa,
sta);
- bss_conf->he_support = sta->sta.he_cap.has_he;
+ bss_conf->he_support = sta->sta.deflink.he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
wiphy_ext_feature_isset(local->hw.wiphy,
@@ -3607,7 +3607,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
elems->eht_cap_len,
sta);
- bss_conf->eht_support = sta->sta.eht_cap.has_eht;
+ bss_conf->eht_support = sta->sta.deflink.eht_cap.has_eht;
} else {
bss_conf->eht_support = false;
}
@@ -3678,7 +3678,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- sta->sta.rx_nss = nss;
+ sta->sta.deflink.rx_nss = nss;
}
rate_control_rate_init(sta);
@@ -4836,9 +4836,9 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
if (!sta)
return;
- timeout = sta->status_stats.last_ack;
- if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx))
- timeout = sta->rx_stats.last_rx;
+ timeout = sta->deflink.status_stats.last_ack;
+ if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx))
+ timeout = sta->deflink.rx_stats.last_rx;
timeout += IEEE80211_CONNECTION_IDLE_TIME;
/* If timeout is after now, then update timer to fire at
@@ -5638,7 +5638,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (rates)
- new_sta->sta.supp_rates[cbss->channel->band] = rates;
+ new_sta->sta.deflink.supp_rates[cbss->channel->band] = rates;
else
sdata_info(sdata,
"No rates found, keeping mandatory only\n");
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 7c1a735b9eee..f97cb4c453d3 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -74,7 +74,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
- sta->sta.supp_rates[band] =
+ sta->sta.deflink.supp_rates[band] =
ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifocb->incomplete_lock);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 8c6416129d5b..ae9700e0a1a5 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -371,7 +371,7 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
WARN_ONCE(i == sband->n_bitrates,
"no supported rates for sta %pM (0x%x, band %d) in rate_mask 0x%x with flags 0x%x\n",
sta ? sta->addr : NULL,
- sta ? sta->supp_rates[sband->band] : -1,
+ sta ? sta->deflink.supp_rates[sband->band] : -1,
sband->band,
rate_mask, rate_flags);
@@ -781,11 +781,11 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
/* Filter out rates that the STA does not support */
- *mask &= sta->supp_rates[sband->band];
+ *mask &= sta->deflink.supp_rates[sband->band];
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+ mcs_mask[i] &= sta->deflink.ht_cap.mcs.rx_mask[i];
- sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta_vht_cap = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
vht_mask[i] &= sta_vht_mask[i];
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 9c6ace858107..7b1f5c045e06 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -362,6 +362,9 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
group = MINSTREL_CCK_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
+ if (!(mi->supported[group] & BIT(idx)))
+ continue;
+
if (rate->idx != mp->cck_rates[idx])
continue;
@@ -603,7 +606,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
int tmp_max_streams, group, tmp_idx, tmp_prob;
int tmp_tp = 0;
- if (!mi->sta->ht_cap.ht_supported)
+ if (!mi->sta->deflink.ht_cap.ht_supported)
return;
group = MI_RATE_GROUP(mi->max_tp_rate[0]);
@@ -993,7 +996,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate;
u16 index;
- bool ht_supported = mi->sta->ht_cap.ht_supported;
+ bool ht_supported = mi->sta->deflink.ht_cap.ht_supported;
if (mi->ampdu_packets > 0) {
if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
@@ -1416,7 +1419,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
* the limit here to avoid the complexity of having to de-aggregate
* packets in the queue.
*/
- if (!mi->sta->vht_cap.vht_supported)
+ if (!mi->sta->deflink.vht_cap.vht_supported)
return IEEE80211_MAX_MPDU_LEN_HT_BA;
/* unlimited */
@@ -1533,7 +1536,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (sband->band != NL80211_BAND_2GHZ)
return;
- if (sta->ht_cap.ht_supported &&
+ if (sta->deflink.ht_cap.ht_supported &&
!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
return;
@@ -1556,7 +1559,7 @@ minstrel_ht_update_ofdm(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
const u8 *rates;
int i;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
return;
rates = mp->ofdm_rates[sband->band];
@@ -1576,9 +1579,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
{
struct minstrel_priv *mp = priv;
struct minstrel_ht_sta *mi = priv_sta;
- struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
- u16 ht_cap = sta->ht_cap.cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
+ u16 ht_cap = sta->deflink.ht_cap.cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
const struct ieee80211_rate *ctl_rate;
bool ldpc, erp;
int use_vht;
@@ -1650,7 +1653,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
}
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
- sta->bandwidth < IEEE80211_STA_RX_BW_40)
+ sta->deflink.bandwidth < IEEE80211_STA_RX_BW_40)
continue;
nss = minstrel_mcs_groups[i].streams;
@@ -1677,7 +1680,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
continue;
if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
- if (sta->bandwidth < IEEE80211_STA_RX_BW_80 ||
+ if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_80 ||
((gflags & IEEE80211_TX_RC_SHORT_GI) &&
!(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
continue;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index beb6b92eb780..959a36fd658b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -221,7 +221,7 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
skb_queue_tail(&sdata->skb_queue, skb);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
if (sta)
- sta->rx_stats.packets++;
+ sta->deflink.rx_stats.packets++;
}
static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
@@ -1465,7 +1465,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
- rx->sta->rx_stats.num_duplicates++;
+ rx->sta->deflink.rx_stats.num_duplicates++;
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
@@ -1761,46 +1761,47 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
NL80211_IFTYPE_ADHOC);
if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1))
- sta->rx_stats.last_rate =
+ sta->deflink.rx_stats.last_rate =
sta_stats_encode_rate(status);
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
} else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control))
- sta->rx_stats.last_rate = sta_stats_encode_rate(status);
+ sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status);
}
- sta->rx_stats.fragments++;
+ sta->deflink.rx_stats.fragments++;
- u64_stats_update_begin(&rx->sta->rx_stats.syncp);
- sta->rx_stats.bytes += rx->skb->len;
- u64_stats_update_end(&rx->sta->rx_stats.syncp);
+ u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp);
+ sta->deflink.rx_stats.bytes += rx->skb->len;
+ u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp);
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
- sta->rx_stats.last_signal = status->signal;
- ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
+ sta->deflink.rx_stats.last_signal = status->signal;
+ ewma_signal_add(&sta->deflink.rx_stats_avg.signal,
+ -status->signal);
}
if (status->chains) {
- sta->rx_stats.chains = status->chains;
+ sta->deflink.rx_stats.chains = status->chains;
for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
int signal = status->chain_signal[i];
if (!(status->chains & BIT(i)))
continue;
- sta->rx_stats.chain_signal_last[i] = signal;
- ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+ sta->deflink.rx_stats.chain_signal_last[i] = signal;
+ ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i],
-signal);
}
}
@@ -1861,7 +1862,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Update counter and free packet here to avoid
* counting this as a dropped packed.
*/
- sta->rx_stats.packets++;
+ sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -1893,11 +1894,11 @@ ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
}
if (rx->sta)
- key = rcu_dereference(rx->sta->gtk[idx]);
+ key = rcu_dereference(rx->sta->deflink.gtk[idx]);
if (!key)
key = rcu_dereference(sdata->keys[idx]);
if (!key && rx->sta)
- key = rcu_dereference(rx->sta->gtk[idx2]);
+ key = rcu_dereference(rx->sta->deflink.gtk[idx2]);
if (!key)
key = rcu_dereference(sdata->keys[idx2]);
@@ -2012,7 +2013,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
test_sta_flag(rx->sta, WLAN_STA_MFP))
return RX_DROP_MONITOR;
- rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
+ rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]);
}
if (!rx->key)
rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
@@ -2035,7 +2036,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
} else {
if (rx->sta) {
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(rx->sta->gtk[i]);
+ key = rcu_dereference(rx->sta->deflink.gtk[i]);
if (key)
break;
}
@@ -2072,7 +2073,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
/* check per-station GTK first, if multicast packet */
if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
+ rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]);
/* if not found, try default key */
if (!rx->key) {
@@ -2398,7 +2399,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
out:
ieee80211_led_rx(rx->local);
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
return RX_CONTINUE;
}
@@ -2645,9 +2646,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* for non-QoS-data frames. Here we know it's a data
* frame, so count MSDUs.
*/
- u64_stats_update_begin(&rx->sta->rx_stats.syncp);
- rx->sta->rx_stats.msdu[rx->seqno_idx]++;
- u64_stats_update_end(&rx->sta->rx_stats.syncp);
+ u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp);
+ rx->sta->deflink.rx_stats.msdu[rx->seqno_idx]++;
+ u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp);
}
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -3178,6 +3179,49 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_skb(sdata, skb);
}
+static void
+ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ const struct element *ie;
+ size_t baselen;
+
+ if (!wiphy_ext_feature_isset(rx->local->hw.wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR))
+ return;
+
+ if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
+ return;
+
+ if (rx->sdata->vif.csa_active)
+ return;
+
+ baselen = mgmt->u.beacon.variable - rx->skb->data;
+ if (baselen > rx->skb->len)
+ return;
+
+ ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
+ mgmt->u.beacon.variable,
+ rx->skb->len - baselen);
+ if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
+ ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
+ struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf;
+ const struct ieee80211_he_operation *he_oper;
+ u8 color;
+
+ he_oper = (void *)(ie->data + 1);
+ if (le32_get_bits(he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED))
+ return;
+
+ color = le32_get_bits(he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
+ if (color == bss_conf->he_bss_color.color)
+ ieeee80211_obss_color_collision_notify(&rx->sdata->vif,
+ BIT_ULL(color));
+ }
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
{
@@ -3203,6 +3247,9 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
int sig = 0;
+ /* sw bss color collision detection */
+ ieee80211_rx_check_bss_color_collision(rx);
+
if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
!(status->flag & RX_FLAG_NO_SIGNAL_VAL))
sig = status->signal;
@@ -3296,7 +3343,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_HT:
/* reject HT action frames from stations not supporting HT */
- if (!rx->sta->sta.ht_cap.ht_supported)
+ if (!rx->sta->sta.deflink.ht_cap.ht_supported)
goto invalid;
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
@@ -3360,7 +3407,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
struct sta_opmode_info sta_opmode = {};
/* If it doesn't support 40 MHz it can't change ... */
- if (!(rx->sta->sta.ht_cap.cap &
+ if (!(rx->sta->sta.deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
goto handled;
@@ -3370,13 +3417,13 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
/* set cur_max_bandwidth and recalc sta bw */
- rx->sta->cur_max_bandwidth = max_bw;
+ rx->sta->deflink.cur_max_bandwidth = max_bw;
new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
- if (rx->sta->sta.bandwidth == new_bw)
+ if (rx->sta->sta.deflink.bandwidth == new_bw)
goto handled;
- rx->sta->sta.bandwidth = new_bw;
+ rx->sta->sta.deflink.bandwidth = new_bw;
sband = rx->local->hw.wiphy->bands[status->band];
sta_opmode.bw =
ieee80211_sta_rx_bw_to_chan_width(rx->sta);
@@ -3573,7 +3620,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
handled:
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@ -3607,7 +3654,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
ieee80211_rx_status_to_khz(status), sig,
rx->skb->data, rx->skb->len, 0)) {
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -3645,7 +3692,7 @@ ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
handled:
if (rx->sta)
- rx->sta->rx_stats.packets++;
+ rx->sta->deflink.rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -3865,7 +3912,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
case RX_DROP_MONITOR:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_stats.dropped++;
+ rx->sta->deflink.rx_stats.dropped++;
fallthrough;
case RX_CONTINUE: {
struct ieee80211_rate *rate = NULL;
@@ -3884,7 +3931,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
case RX_DROP_UNUSABLE:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_stats.dropped++;
+ rx->sta->deflink.rx_stats.dropped++;
dev_kfree_skb(rx->skb);
break;
case RX_QUEUED:
@@ -4436,15 +4483,15 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
void *sa = skb->data + ETH_ALEN;
void *da = skb->data;
- stats = &sta->rx_stats;
+ stats = &sta->deflink.rx_stats;
if (fast_rx->uses_rss)
- stats = this_cpu_ptr(sta->pcpu_rx_stats);
+ stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
/* statistics part of ieee80211_rx_h_sta_process() */
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
if (!fast_rx->uses_rss)
- ewma_signal_add(&sta->rx_stats_avg.signal,
+ ewma_signal_add(&sta->deflink.rx_stats_avg.signal,
-status->signal);
}
@@ -4460,7 +4507,7 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
stats->chain_signal_last[i] = signal;
if (!fast_rx->uses_rss)
- ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+ ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i],
-signal);
}
}
@@ -4536,7 +4583,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
} addrs __aligned(2);
- struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+ struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
* to a common data structure; drivers can implement that per queue
@@ -4638,7 +4685,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
drop:
dev_kfree_skb(skb);
if (fast_rx->uses_rss)
- stats = this_cpu_ptr(sta->pcpu_rx_stats);
+ stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
stats->dropped++;
return true;
diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c
index 4141bc80cdfd..8ca7d45d6daa 100644
--- a/net/mac80211/s1g.c
+++ b/net/mac80211/s1g.c
@@ -11,8 +11,8 @@
void ieee80211_s1g_sta_rate_init(struct sta_info *sta)
{
/* avoid indicating legacy bitrates for S1G STAs */
- sta->tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS;
- sta->rx_stats.last_rate =
+ sta->deflink.tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS;
+ sta->deflink.rx_stats.last_rate =
STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 91fbb1ee5c38..e04a0905e941 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -287,7 +287,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
- free_percpu(sta->pcpu_rx_stats);
+ free_percpu(sta->deflink.pcpu_rx_stats);
kfree(sta);
}
@@ -346,9 +346,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return NULL;
if (ieee80211_hw_check(hw, USES_RSS)) {
- sta->pcpu_rx_stats =
+ sta->deflink.pcpu_rx_stats =
alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
- if (!sta->pcpu_rx_stats)
+ if (!sta->deflink.pcpu_rx_stats)
goto free;
}
@@ -376,6 +376,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sta.max_rx_aggregation_subframes =
local->hw.max_rx_aggregation_subframes;
+ /* TODO link specific alloc and assignments for MLO Link STA */
+
+ /* For non MLO STA, link info can be accessed either via deflink
+ * or link[0]
+ */
+ sta->link[0] = &sta->deflink;
+ sta->sta.link[0] = &sta->sta.deflink;
+
/* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only.
* The Tx path starts to use a key as soon as the key slot ptk_idx
* references to is not NULL. To not use the initial Rx-only key
@@ -387,9 +395,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->local = local;
sta->sdata = sdata;
- sta->rx_stats.last_rx = jiffies;
+ sta->deflink.rx_stats.last_rx = jiffies;
- u64_stats_init(&sta->rx_stats.syncp);
+ u64_stats_init(&sta->deflink.rx_stats.syncp);
ieee80211_init_frag_cache(&sta->frags);
@@ -399,10 +407,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
sta->last_connected = ktime_get_seconds();
- ewma_signal_init(&sta->rx_stats_avg.signal);
- ewma_avg_signal_init(&sta->status_stats.avg_ack_signal);
- for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++)
- ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]);
+ ewma_signal_init(&sta->deflink.rx_stats_avg.signal);
+ ewma_avg_signal_init(&sta->deflink.status_stats.avg_ack_signal);
+ for (i = 0; i < ARRAY_SIZE(sta->deflink.rx_stats_avg.chain_signal); i++)
+ ewma_signal_init(&sta->deflink.rx_stats_avg.chain_signal[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -472,7 +480,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (!(rate->flags & mandatory))
continue;
- sta->sta.supp_rates[i] |= BIT(r);
+ sta->sta.deflink.supp_rates[i] |= BIT(r);
}
}
@@ -524,7 +532,7 @@ free_txq:
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
free:
- free_percpu(sta->pcpu_rx_stats);
+ free_percpu(sta->deflink.pcpu_rx_stats);
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
@@ -2087,16 +2095,16 @@ int sta_info_move_state(struct sta_info *sta,
u8 sta_info_tx_streams(struct sta_info *sta)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.deflink.ht_cap;
u8 rx_streams;
- if (!sta->sta.ht_cap.ht_supported)
+ if (!sta->sta.deflink.ht_cap.ht_supported)
return 1;
- if (sta->sta.vht_cap.vht_supported) {
+ if (sta->sta.deflink.vht_cap.vht_supported) {
int i;
u16 tx_mcs_map =
- le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map);
+ le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.tx_mcs_map);
for (i = 7; i >= 0; i--)
if ((tx_mcs_map & (0x3 << (i * 2))) !=
@@ -2123,16 +2131,16 @@ u8 sta_info_tx_streams(struct sta_info *sta)
static struct ieee80211_sta_rx_stats *
sta_get_last_rx_stats(struct sta_info *sta)
{
- struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+ struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
int cpu;
- if (!sta->pcpu_rx_stats)
+ if (!sta->deflink.pcpu_rx_stats)
return stats;
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpustats;
- cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu);
if (time_after(cpustats->last_rx, stats->last_rx))
stats = cpustats;
@@ -2226,13 +2234,15 @@ static void sta_set_tidstats(struct sta_info *sta,
int cpu;
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);
+ tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats,
+ tid);
- if (sta->pcpu_rx_stats) {
+ if (sta->deflink.pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats,
+ cpu);
tidstats->rx_msdu +=
sta_get_tidstats_msdu(cpurxs, tid);
}
@@ -2243,19 +2253,19 @@ static void sta_set_tidstats(struct sta_info *sta,
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->tx_stats.msdu[tid];
+ tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid];
+ tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid];
+ tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid];
}
if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) {
@@ -2326,26 +2336,27 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
sinfo->tx_bytes = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_bytes += sta->tx_stats.bytes[ac];
+ sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac];
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
sinfo->tx_packets = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_packets += sta->tx_stats.packets[ac];
+ sinfo->tx_packets += sta->deflink.tx_stats.packets[ac];
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
}
if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
- sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+ sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats);
- if (sta->pcpu_rx_stats) {
+ if (sta->deflink.pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats,
+ cpu);
sinfo->rx_bytes += sta_get_stats_bytes(cpurxs);
}
}
@@ -2354,12 +2365,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
- sinfo->rx_packets = sta->rx_stats.packets;
- if (sta->pcpu_rx_stats) {
+ sinfo->rx_packets = sta->deflink.rx_stats.packets;
+ if (sta->deflink.pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats,
+ cpu);
sinfo->rx_packets += cpurxs->packets;
}
}
@@ -2367,12 +2379,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
- sinfo->tx_retries = sta->status_stats.retry_count;
+ sinfo->tx_retries = sta->deflink.status_stats.retry_count;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
- sinfo->tx_failed = sta->status_stats.retry_failed;
+ sinfo->tx_failed = sta->deflink.status_stats.retry_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
@@ -2393,12 +2405,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
}
- sinfo->rx_dropped_misc = sta->rx_stats.dropped;
- if (sta->pcpu_rx_stats) {
+ sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped;
+ if (sta->deflink.pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu);
sinfo->rx_dropped_misc += cpurxs->dropped;
}
}
@@ -2417,10 +2429,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
- if (!sta->pcpu_rx_stats &&
+ if (!sta->deflink.pcpu_rx_stats &&
!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
sinfo->signal_avg =
- -ewma_signal_read(&sta->rx_stats_avg.signal);
+ -ewma_signal_read(&sta->deflink.rx_stats_avg.signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
}
@@ -2433,7 +2445,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
- if (!sta->pcpu_rx_stats)
+ if (!sta->deflink.pcpu_rx_stats)
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
sinfo->chains = last_rxstats->chains;
@@ -2442,12 +2454,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->chain_signal[i] =
last_rxstats->chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]);
+ -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]);
}
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
- sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
+ sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate,
&sinfo->txrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
@@ -2529,16 +2541,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) &&
- sta->status_stats.ack_signal_filled) {
- sinfo->ack_signal = sta->status_stats.last_ack_signal;
+ sta->deflink.status_stats.ack_signal_filled) {
+ sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
- sta->status_stats.ack_signal_filled) {
+ sta->deflink.status_stats.ack_signal_filled) {
sinfo->avg_ack_signal =
-(s8)ewma_avg_signal_read(
- &sta->status_stats.avg_ack_signal);
+ &sta->deflink.status_stats.avg_ack_signal);
sinfo->filled |=
BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
@@ -2573,10 +2585,10 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
- if (!sta->status_stats.last_ack ||
- time_after(stats->last_rx, sta->status_stats.last_ack))
+ if (!sta->deflink.status_stats.last_ack ||
+ time_after(stats->last_rx, sta->deflink.status_stats.last_ack))
return stats->last_rx;
- return sta->status_stats.last_ack;
+ return sta->deflink.status_stats.last_ack;
}
static void sta_update_codel_params(struct sta_info *sta, u32 thr)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 379fd367197f..35c390bedfba 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -484,6 +484,86 @@ struct ieee80211_fragment_cache {
#define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */
/**
+ * struct link_sta_info - Link STA information
+ * All link specific sta info are stored here for reference. This can be
+ * a single entry for non-MLD STA or multiple entries for MLD STA
+ * @addr: Link MAC address - Can be same as MLD STA mac address and is always
+ * same for non-MLD STA. This is used as key for searching link STA
+ * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
+ * and set to the corresponding vif LinkId for MLD STA
+ * @sta: Points to the STA info
+ * @gtk: group keys negotiated with this station, if any
+ * @tx_stats: TX statistics
+ * @tx_stats.packets: # of packets transmitted
+ * @tx_stats.bytes: # of bytes in all packets transmitted
+ * @tx_stats.last_rate: last TX rate
+ * @tx_stats.msdu: # of transmitted MSDUs per TID
+ * @rx_stats: RX statistics
+ * @rx_stats_avg: averaged RX statistics
+ * @rx_stats_avg.signal: averaged signal
+ * @rx_stats_avg.chain_signal: averaged per-chain signal
+ * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
+ * this (by advertising the USES_RSS hw flag)
+ * @status_stats: TX status statistics
+ * @status_stats.filtered: # of filtered frames
+ * @status_stats.retry_failed: # of frames that failed after retry
+ * @status_stats.retry_count: # of retries attempted
+ * @status_stats.lost_packets: # of lost packets
+ * @status_stats.last_pkt_time: timestamp of last ACKed packet
+ * @status_stats.msdu_retries: # of MSDU retries
+ * @status_stats.msdu_failed: # of failed MSDUs
+ * @status_stats.last_ack: last ack timestamp (jiffies)
+ * @status_stats.last_ack_signal: last ACK signal
+ * @status_stats.ack_signal_filled: last ACK signal validity
+ * @status_stats.avg_ack_signal: average ACK signal
+ * TODO Move other link params from sta_info as required for MLD operation
+ */
+struct link_sta_info {
+ u8 addr[ETH_ALEN];
+ u8 link_id;
+
+ /* TODO rhash head/node for finding link_sta based on addr */
+
+ struct sta_info *sta;
+ struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
+ NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS];
+ struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats;
+
+ /* Updated from RX path only, no locking requirements */
+ struct ieee80211_sta_rx_stats rx_stats;
+ struct {
+ struct ewma_signal signal;
+ struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS];
+ } rx_stats_avg;
+
+ /* Updated from TX status path only, no locking requirements */
+ struct {
+ unsigned long filtered;
+ unsigned long retry_failed, retry_count;
+ unsigned int lost_packets;
+ unsigned long last_pkt_time;
+ u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
+ u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+ unsigned long last_ack;
+ s8 last_ack_signal;
+ bool ack_signal_filled;
+ struct ewma_avg_signal avg_ack_signal;
+ } status_stats;
+
+ /* Updated from TX path only, no locking requirements */
+ struct {
+ u64 packets[IEEE80211_NUM_ACS];
+ u64 bytes[IEEE80211_NUM_ACS];
+ struct ieee80211_tx_rate last_rate;
+ struct rate_info last_rate_info;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+ } tx_stats;
+
+ enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+};
+
+/**
* struct sta_info - STA information
*
* This structure collects information about a station that
@@ -498,7 +578,6 @@ struct ieee80211_fragment_cache {
* @sdata: virtual interface this station belongs to
* @ptk: peer keys negotiated with this station, if any
* @ptk_idx: last installed peer key index
- * @gtk: group keys negotiated with this station, if any
* @rate_ctrl: rate control algorithm reference
* @rate_ctrl_lock: spinlock used to protect rate control data
* (data inside the algorithm, so serializes calls there)
@@ -544,30 +623,19 @@ struct ieee80211_fragment_cache {
* @fast_rx: RX fastpath information
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
- * @tx_stats: TX statistics
- * @tx_stats.packets: # of packets transmitted
- * @tx_stats.bytes: # of bytes in all packets transmitted
- * @tx_stats.last_rate: last TX rate
- * @tx_stats.msdu: # of transmitted MSDUs per TID
- * @rx_stats: RX statistics
- * @rx_stats_avg: averaged RX statistics
- * @rx_stats_avg.signal: averaged signal
- * @rx_stats_avg.chain_signal: averaged per-chain signal
- * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
- * this (by advertising the USES_RSS hw flag)
- * @status_stats: TX status statistics
- * @status_stats.filtered: # of filtered frames
- * @status_stats.retry_failed: # of frames that failed after retry
- * @status_stats.retry_count: # of retries attempted
- * @status_stats.lost_packets: # of lost packets
- * @status_stats.last_pkt_time: timestamp of last ACKed packet
- * @status_stats.msdu_retries: # of MSDU retries
- * @status_stats.msdu_failed: # of failed MSDUs
- * @status_stats.last_ack: last ack timestamp (jiffies)
- * @status_stats.last_ack_signal: last ACK signal
- * @status_stats.ack_signal_filled: last ACK signal validity
- * @status_stats.avg_ack_signal: average ACK signal
* @frags: fragment cache
+ * @multi_link_sta: Identifies if this sta is a MLD STA or regular STA
+ * @deflink: This is the default link STA information, for non MLO STA all link
+ * specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
*/
struct sta_info {
/* General information, mostly static */
@@ -577,9 +645,6 @@ struct sta_info {
u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
- NUM_DEFAULT_MGMT_KEYS +
- NUM_DEFAULT_BEACON_KEYS];
struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
u8 ptk_idx;
struct rate_control_ref *rate_ctrl;
@@ -589,7 +654,6 @@ struct sta_info {
struct ieee80211_fast_tx __rcu *fast_tx;
struct ieee80211_fast_rx __rcu *fast_rx;
- struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats;
#ifdef CONFIG_MAC80211_MESH
struct mesh_sta *mesh;
@@ -619,38 +683,9 @@ struct sta_info {
u64 assoc_at;
long last_connected;
- /* Updated from RX path only, no locking requirements */
- struct ieee80211_sta_rx_stats rx_stats;
- struct {
- struct ewma_signal signal;
- struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS];
- } rx_stats_avg;
-
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
- /* Updated from TX status path only, no locking requirements */
- struct {
- unsigned long filtered;
- unsigned long retry_failed, retry_count;
- unsigned int lost_packets;
- unsigned long last_pkt_time;
- u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
- u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
- unsigned long last_ack;
- s8 last_ack_signal;
- bool ack_signal_filled;
- struct ewma_avg_signal avg_ack_signal;
- } status_stats;
-
- /* Updated from TX path only, no locking requirements */
- struct {
- u64 packets[IEEE80211_NUM_ACS];
- u64 bytes[IEEE80211_NUM_ACS];
- struct ieee80211_tx_rate last_rate;
- struct rate_info last_rate_info;
- u64 msdu[IEEE80211_NUM_TIDS + 1];
- } tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
struct airtime_info airtime[IEEE80211_NUM_ACS];
@@ -664,8 +699,6 @@ struct sta_info {
struct dentry *debugfs_dir;
#endif
- enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
-
enum ieee80211_smps_mode known_smps_mode;
const struct ieee80211_cipher_scheme *cipher_scheme;
@@ -677,6 +710,10 @@ struct sta_info {
struct ieee80211_fragment_cache frags;
+ bool multi_link_sta;
+ struct link_sta_info deflink;
+ struct link_sta_info *link[MAX_STA_LINKS];
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index e81e8a5bb774..c563fa718d84 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -72,7 +72,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
- sta->status_stats.filtered++;
+ sta->deflink.status_stats.filtered++;
/*
* Clear more-data bit on filtered frames, it might be set
@@ -776,7 +776,7 @@ static void ieee80211_lost_packet(struct sta_info *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- sta->status_stats.lost_packets++;
+ sta->deflink.status_stats.lost_packets++;
if (sta->sta.tdls) {
pkt_time = STA_LOST_TDLS_PKT_TIME;
pkt_thr = STA_LOST_PKT_THRESHOLD;
@@ -789,13 +789,14 @@ static void ieee80211_lost_packet(struct sta_info *sta,
* mechanism.
* For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/
- if (sta->status_stats.lost_packets < pkt_thr ||
- !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time))
+ if (sta->deflink.status_stats.lost_packets < pkt_thr ||
+ !time_after(jiffies, sta->deflink.status_stats.last_pkt_time + pkt_time))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
- sta->status_stats.lost_packets, GFP_ATOMIC);
- sta->status_stats.lost_packets = 0;
+ sta->deflink.status_stats.lost_packets,
+ GFP_ATOMIC);
+ sta->deflink.status_stats.lost_packets = 0;
}
static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
@@ -930,7 +931,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
- sta->tx_stats.last_rate =
+ sta->deflink.tx_stats.last_rate =
info->status.rates[rates_idx];
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
@@ -976,9 +977,9 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
return;
} else if (ieee80211_is_data_present(fc)) {
if (!acked && !noack_success)
- sta->status_stats.msdu_failed[tid]++;
+ sta->deflink.status_stats.msdu_failed[tid]++;
- sta->status_stats.msdu_retries[tid] +=
+ sta->deflink.status_stats.msdu_retries[tid] +=
retry_count;
}
@@ -1111,7 +1112,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta = container_of(pubsta, struct sta_info, sta);
if (status->rate)
- sta->tx_stats.last_rate_info = *status->rate;
+ sta->deflink.tx_stats.last_rate_info = *status->rate;
}
if (skb && (tx_time_est =
@@ -1142,8 +1143,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (!acked && !noack_success)
- sta->status_stats.retry_failed++;
- sta->status_stats.retry_count += retry_count;
+ sta->deflink.status_stats.retry_failed++;
+ sta->deflink.status_stats.retry_count += retry_count;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -1152,13 +1153,13 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
acked, info->status.tx_time);
if (acked) {
- sta->status_stats.last_ack = jiffies;
+ sta->deflink.status_stats.last_ack = jiffies;
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
+ if (sta->deflink.status_stats.lost_packets)
+ sta->deflink.status_stats.lost_packets = 0;
/* Track when last packet was ACKed */
- sta->status_stats.last_pkt_time = jiffies;
+ sta->deflink.status_stats.last_pkt_time = jiffies;
/* Reset connection monitor */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -1166,10 +1167,10 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sdata->u.mgd.probe_send_count = 0;
if (ack_signal_valid) {
- sta->status_stats.last_ack_signal =
+ sta->deflink.status_stats.last_ack_signal =
(s8)info->status.ack_signal;
- sta->status_stats.ack_signal_filled = true;
- ewma_avg_signal_add(&sta->status_stats.avg_ack_signal,
+ sta->deflink.status_stats.ack_signal_filled = true;
+ ewma_avg_signal_add(&sta->deflink.status_stats.avg_ack_signal,
-info->status.ack_signal);
}
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
@@ -1235,7 +1236,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
rate_control_tx_status(local, sband, &status);
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
- sta->tx_stats.last_rate = info->status.rates[0];
+ sta->deflink.tx_stats.last_rate = info->status.rates[0];
}
EXPORT_SYMBOL(ieee80211_tx_rate_update);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 137be9ec94af..4e2d22e47429 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -459,9 +459,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
- ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+ ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) {
/* the peer caps are already intersected with our own */
- memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
+ memcpy(&ht_cap, &sta->sta.deflink.ht_cap, sizeof(ht_cap));
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
@@ -510,9 +510,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
- vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) {
+ vht_cap.vht_supported && sta->sta.deflink.vht_cap.vht_supported) {
/* the peer caps are already intersected with our own */
- memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap));
+ memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap));
/* the AID is present only when VHT is implemented */
ieee80211_tdls_add_aid(sdata, skb);
@@ -603,13 +603,13 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
* if HT support is only added in TDLS, we need an HT-operation IE.
* add the IE as required by IEEE802.11-2012 9.23.3.2.
*/
- if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+ if (!ap_sta->sta.deflink.ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) {
u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
- ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
+ ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
&sdata->vif.bss_conf.chandef, prot,
true);
}
@@ -618,7 +618,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
/* only include VHT-operation if not on the 2.4GHz band */
if (sband->band != NL80211_BAND_2GHZ &&
- sta->sta.vht_cap.vht_supported) {
+ sta->sta.deflink.vht_cap.vht_supported) {
/*
* if both peers support WIDER_BW, we can expand the chandef to
* a wider compatible one, up to 80MHz
@@ -627,7 +627,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
- ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+ ieee80211_ie_build_vht_oper(pos, &sta->sta.deflink.vht_cap,
&sta->tdls_chandef);
}
@@ -1269,8 +1269,8 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
- if (bw != sta->sta.bandwidth) {
- sta->sta.bandwidth = bw;
+ if (bw != sta->sta.deflink.bandwidth) {
+ sta->sta.deflink.bandwidth = bw;
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_BW_CHANGED);
/*
@@ -1296,7 +1296,7 @@ static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
- !sta->sta.ht_cap.ht_supported)
+ !sta->sta.deflink.ht_cap.ht_supported)
continue;
result = true;
break;
@@ -1321,7 +1321,7 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
return;
- tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
+ tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
iee80211_tdls_have_ht_peers(sdata);
opmode = sdata->vif.bss_conf.ht_operation_mode;
@@ -1900,7 +1900,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
}
/* peer should have known better */
- if (!sta->sta.ht_cap.ht_supported && elems->sec_chan_offs &&
+ if (!sta->sta.deflink.ht_cap.ht_supported && elems->sec_chan_offs &&
elems->sec_chan_offs->sec_chan_offs) {
tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
ret = -ENOTSUPP;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index d91498f77796..743adfbb9b15 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -860,8 +860,8 @@ TRACE_EVENT(drv_sta_set_txpwr,
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->txpwr = sta->txpwr.power;
- __entry->type = sta->txpwr.type;
+ __entry->txpwr = sta->deflink.txpwr.power;
+ __entry->type = sta->deflink.txpwr.type;
),
TP_printk(
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b6b20f38de0e..13253eb39d09 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -768,9 +768,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = tx->rate;
if (tx->sta && ieee80211_is_tx_data(tx->skb))
- tx->sta->tx_stats.last_rate = txrc.reported_rate;
+ tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate;
} else if (tx->sta)
- tx->sta->tx_stats.last_rate = txrc.reported_rate;
+ tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate;
if (ratetbl)
return TX_CONTINUE;
@@ -837,7 +837,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
if (tx->sta)
- tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
+ tx->sta->deflink.tx_stats.msdu[IEEE80211_NUM_TIDS]++;
return TX_CONTINUE;
}
@@ -851,7 +851,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
/* include per-STA, per-TID sequence counter */
tid = ieee80211_get_tid(hdr);
- tx->sta->tx_stats.msdu[tid]++;
+ tx->sta->deflink.tx_stats.msdu[tid]++;
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
@@ -1004,10 +1004,10 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
skb_queue_walk(&tx->skbs, skb) {
ac = skb_get_queue_mapping(skb);
- tx->sta->tx_stats.bytes[ac] += skb->len;
+ tx->sta->deflink.tx_stats.bytes[ac] += skb->len;
}
if (ac >= 0)
- tx->sta->tx_stats.packets[ac]++;
+ tx->sta->deflink.tx_stats.packets[ac]++;
return TX_CONTINUE;
}
@@ -1159,7 +1159,7 @@ ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
return;
- if (!sta || !sta->sta.ht_cap.ht_supported ||
+ if (!sta || !sta->sta.deflink.ht_cap.ht_supported ||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
skb->protocol == sdata->control_port_protocol)
return;
@@ -3462,18 +3462,18 @@ ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
}
if (skb_shinfo(skb)->gso_size)
- sta->tx_stats.msdu[tid] +=
+ sta->deflink.tx_stats.msdu[tid] +=
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
else
- sta->tx_stats.msdu[tid]++;
+ sta->deflink.tx_stats.msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
/* statistics normally done by ieee80211_tx_h_stats (but that
* has to consider fragmentation, so is more complex)
*/
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
+ sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (pn_offs) {
u64 pn;
@@ -4481,8 +4481,8 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
dev_sw_netstats_tx_add(dev, 1, skb->len);
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
+ sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 8f16aa9c725d..ff26e0c4787b 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -118,14 +118,14 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_vht_cap *vht_cap_ie,
struct sta_info *sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
- if (!sta->sta.ht_cap.ht_supported)
+ if (!sta->sta.deflink.ht_cap.ht_supported)
return;
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
@@ -295,10 +295,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
default:
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
if (!(vht_cap->vht_mcs.tx_highest &
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
@@ -310,10 +310,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
* above) between 160 and 80+80 yet.
*/
if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
}
- sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+ sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta);
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -332,9 +332,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* FIXME: move this to some better location - parses HE/EHT now */
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
- struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap;
u32 cap_width;
if (he_cap->has_he) {
@@ -369,7 +369,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
}
if (!vht_cap->vht_supported)
- return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 :
IEEE80211_STA_RX_BW_20;
@@ -392,14 +392,14 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
{
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
u32 cap_width;
if (!vht_cap->vht_supported) {
- if (!sta->sta.ht_cap.ht_supported)
+ if (!sta->sta.deflink.ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
- return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
}
@@ -416,13 +416,13 @@ enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
enum nl80211_chan_width
ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta)
{
- enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.bandwidth;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.deflink.bandwidth;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap;
u32 cap_width;
switch (cur_bw) {
case IEEE80211_STA_RX_BW_20:
- if (!sta->sta.ht_cap.ht_supported)
+ if (!sta->sta.deflink.ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20_NOHT;
else
return NL80211_CHAN_WIDTH_20;
@@ -473,7 +473,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
bw = ieee80211_sta_cap_rx_bw(sta);
- bw = min(bw, sta->cur_max_bandwidth);
+ bw = min(bw, sta->deflink.cur_max_bandwidth);
/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
* IEEE80211-2016 specification makes higher bandwidth operation
@@ -501,12 +501,12 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
bool support_160;
/* if we received a notification already don't overwrite it */
- if (sta->sta.rx_nss)
+ if (sta->sta.deflink.rx_nss)
return;
- if (sta->sta.eht_cap.has_eht) {
+ if (sta->sta.deflink.eht_cap.has_eht) {
int i;
- const u8 *rx_nss_mcs = (void *)&sta->sta.eht_cap.eht_mcs_nss_supp;
+ const u8 *rx_nss_mcs = (void *)&sta->sta.deflink.eht_cap.eht_mcs_nss_supp;
/* get the max nss for EHT over all possible bandwidths and mcs */
for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++)
@@ -515,10 +515,10 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
IEEE80211_EHT_MCS_NSS_RX));
}
- if (sta->sta.he_cap.has_he) {
+ if (sta->sta.deflink.he_cap.has_he) {
int i;
u8 rx_mcs_80 = 0, rx_mcs_160 = 0;
- const struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap;
u16 mcs_160_map =
le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
@@ -549,23 +549,23 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
he_rx_nss = rx_mcs_80;
}
- if (sta->sta.ht_cap.ht_supported) {
- if (sta->sta.ht_cap.mcs.rx_mask[0])
+ if (sta->sta.deflink.ht_cap.ht_supported) {
+ if (sta->sta.deflink.ht_cap.mcs.rx_mask[0])
ht_rx_nss++;
- if (sta->sta.ht_cap.mcs.rx_mask[1])
+ if (sta->sta.deflink.ht_cap.mcs.rx_mask[1])
ht_rx_nss++;
- if (sta->sta.ht_cap.mcs.rx_mask[2])
+ if (sta->sta.deflink.ht_cap.mcs.rx_mask[2])
ht_rx_nss++;
- if (sta->sta.ht_cap.mcs.rx_mask[3])
+ if (sta->sta.deflink.ht_cap.mcs.rx_mask[3])
ht_rx_nss++;
/* FIXME: consider rx_highest? */
}
- if (sta->sta.vht_cap.vht_supported) {
+ if (sta->sta.deflink.vht_cap.vht_supported) {
int i;
u16 rx_mcs_map;
- rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map);
+ rx_mcs_map = le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.rx_mcs_map);
for (i = 7; i >= 0; i--) {
u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
@@ -581,7 +581,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- sta->sta.rx_nss = max_t(u8, 1, rx_nss);
+ sta->sta.deflink.rx_nss = max_t(u8, 1, rx_nss);
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -601,8 +601,8 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (sta->sta.rx_nss != nss) {
- sta->sta.rx_nss = nss;
+ if (sta->sta.deflink.rx_nss != nss) {
+ sta->sta.deflink.rx_nss = nss;
sta_opmode.rx_nss = nss;
changed |= IEEE80211_RC_NSS_CHANGED;
sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
@@ -611,27 +611,27 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
/* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80)
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
else
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
break;
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
/* legacy only, no longer used by newer spec */
- sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
+ sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
break;
}
new_bw = ieee80211_sta_cur_vht_bw(sta);
- if (new_bw != sta->sta.bandwidth) {
- sta->sta.bandwidth = new_bw;
+ if (new_bw != sta->sta.deflink.bandwidth) {
+ sta->sta.deflink.bandwidth = new_bw;
sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(sta);
changed |= IEEE80211_RC_BW_CHANGED;
sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED;
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index fbeebe3bc31d..1e4a9f74ed43 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -118,6 +118,7 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
if (!ret) {
wpan_phy->current_page = page;
wpan_phy->current_channel = channel;
+ ieee802154_configure_durations(wpan_phy);
}
return ret;
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 702560acc8ce..1381e6a5e180 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -56,6 +56,8 @@ struct ieee802154_local {
struct sk_buff *tx_skb;
struct work_struct tx_work;
+ /* A negative Linux error code or a null/positive MLME error status */
+ int tx_result;
};
enum {
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 520cedc594e1..bd7bdb1219dd 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -113,6 +113,50 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
}
EXPORT_SYMBOL(ieee802154_alloc_hw);
+void ieee802154_configure_durations(struct wpan_phy *phy)
+{
+ u32 duration = 0;
+
+ switch (phy->current_page) {
+ case 0:
+ if (BIT(phy->current_channel) & 0x1)
+ /* 868 MHz BPSK 802.15.4-2003: 20 ksym/s */
+ duration = 50 * NSEC_PER_USEC;
+ else if (BIT(phy->current_channel) & 0x7FE)
+ /* 915 MHz BPSK 802.15.4-2003: 40 ksym/s */
+ duration = 25 * NSEC_PER_USEC;
+ else if (BIT(phy->current_channel) & 0x7FFF800)
+ /* 2400 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
+ duration = 16 * NSEC_PER_USEC;
+ break;
+ case 2:
+ if (BIT(phy->current_channel) & 0x1)
+ /* 868 MHz O-QPSK 802.15.4-2006: 25 ksym/s */
+ duration = 40 * NSEC_PER_USEC;
+ else if (BIT(phy->current_channel) & 0x7FE)
+ /* 915 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
+ duration = 16 * NSEC_PER_USEC;
+ break;
+ case 3:
+ if (BIT(phy->current_channel) & 0x3FFF)
+ /* 2.4 GHz CSS 802.15.4a-2007: 1/6 Msym/s */
+ duration = 6 * NSEC_PER_USEC;
+ break;
+ default:
+ break;
+ }
+
+ if (!duration) {
+ pr_debug("Unknown PHY symbol duration\n");
+ return;
+ }
+
+ phy->symbol_duration = duration;
+ phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+ phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+}
+EXPORT_SYMBOL(ieee802154_configure_durations);
+
void ieee802154_free_hw(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
@@ -131,10 +175,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
* Should be done when all drivers sets this value.
*/
- wpan_phy->lifs_period = IEEE802154_LIFS_PERIOD *
- wpan_phy->symbol_duration;
- wpan_phy->sifs_period = IEEE802154_SIFS_PERIOD *
- wpan_phy->symbol_duration;
+ wpan_phy->lifs_period =
+ (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+ wpan_phy->sifs_period =
+ (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
}
int ieee802154_register_hw(struct ieee802154_hw *hw)
@@ -157,6 +201,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
ieee802154_setup_wpan_phy_pib(local->phy);
+ ieee802154_configure_durations(local->phy);
+
if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
local->phy->supported.min_csma_backoffs = 4;
local->phy->supported.max_csma_backoffs = 4;
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index f2078238718b..9f024d85563b 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -58,8 +58,11 @@ enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling)
{
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ local->tx_result = IEEE802154_SUCCESS;
+
if (ifs_handling) {
- struct ieee802154_local *local = hw_to_local(hw);
u8 max_sifs_size;
/* If transceiver sets CRC on his own we need to use lifs
@@ -88,6 +91,23 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
+void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
+ int reason)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ local->tx_result = reason;
+ ieee802154_wake_queue(hw);
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ieee802154_xmit_error);
+
+void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
+}
+EXPORT_SYMBOL(ieee802154_xmit_hw_error);
+
void ieee802154_stop_device(struct ieee802154_local *local)
{
flush_workqueue(local->workqueue);
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index e22b0cbb2f35..c2fc2a7b2528 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -216,7 +216,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
return -EOPNOTSUPP;
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &rc);
+ skb = skb_recv_datagram(sk, flags, &rc);
if (!skb)
return rc;
@@ -238,7 +238,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (rc < 0)
goto out_free;
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (addr) {
struct mctp_skb_cb *cb = mctp_cb(skb);
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 61205cf40074..24df29e135ed 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -352,7 +352,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
if (params->deliver) {
KUNIT_EXPECT_EQ(test, rc, 0);
- skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
KUNIT_EXPECT_EQ(test, skb->len, 1);
@@ -360,7 +360,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
} else {
KUNIT_EXPECT_NE(test, rc, 0);
- skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
}
@@ -423,7 +423,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
rc = mctp_route_input(&rt->rt, skb);
}
- skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
if (params->rx_len) {
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
@@ -582,7 +582,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
rc = mctp_route_input(&rt->rt, skb);
/* (potentially) receive message */
- skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
if (params->deliver)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d6fdc5782d33..35b5f806fdda 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1527,10 +1527,9 @@ static int mpls_ifdown(struct net_device *dev, int event)
rt->rt_nh_size;
struct mpls_route *orig = rt;
- rt = kmalloc(size, GFP_KERNEL);
+ rt = kmemdup(orig, size, GFP_KERNEL);
if (!rt)
return -ENOMEM;
- memcpy(rt, orig, size);
}
}
diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile
index e54daceac58b..cb7f53f6ab22 100644
--- a/net/mptcp/Makefile
+++ b/net/mptcp/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_MPTCP) += mptcp.o
mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \
- mib.o pm_netlink.o sockopt.o
+ mib.o pm_netlink.o sockopt.o pm_userspace.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 8b235468c88f..ae20b7d92e28 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -16,6 +16,11 @@
#define MPTCP_SYSCTL_PATH "net/mptcp"
static int mptcp_pernet_id;
+
+#ifdef CONFIG_SYSCTL
+static int mptcp_pm_type_max = __MPTCP_PM_TYPE_MAX;
+#endif
+
struct mptcp_pernet {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_hdr;
@@ -26,6 +31,7 @@ struct mptcp_pernet {
u8 mptcp_enabled;
u8 checksum_enabled;
u8 allow_join_initial_addr_port;
+ u8 pm_type;
};
static struct mptcp_pernet *mptcp_get_pernet(const struct net *net)
@@ -58,6 +64,11 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net)
return mptcp_get_pernet(net)->stale_loss_cnt;
}
+int mptcp_get_pm_type(const struct net *net)
+{
+ return mptcp_get_pernet(net)->pm_type;
+}
+
static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
@@ -65,6 +76,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
pernet->checksum_enabled = 0;
pernet->allow_join_initial_addr_port = 1;
pernet->stale_loss_cnt = 4;
+ pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
}
#ifdef CONFIG_SYSCTL
@@ -108,6 +120,14 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
},
+ {
+ .procname = "pm_type",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &mptcp_pm_type_max
+ },
{}
};
@@ -128,6 +148,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[2].data = &pernet->checksum_enabled;
table[3].data = &pernet->allow_join_initial_addr_port;
table[4].data = &pernet->stale_loss_cnt;
+ table[5].data = &pernet->pm_type;
hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
if (!hdr)
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index e55d3dfbee0c..d93a8c9996fd 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -24,6 +24,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
+ SNMP_MIB_ITEM("InfiniteMapTx", MPTCP_MIB_INFINITEMAPTX),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
SNMP_MIB_ITEM("DataCsumErr", MPTCP_MIB_DATACSUMERR),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 00576179a619..529d07af9e14 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -17,6 +17,7 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
+ MPTCP_MIB_INFINITEMAPTX, /* Sent an infinite mapping */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */
MPTCP_MIB_DATACSUMERR, /* The data checksum fail */
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index f44125dd6697..dbb6d876a203 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -66,20 +66,103 @@ out_nosk:
return err;
}
+struct mptcp_diag_ctx {
+ long s_slot;
+ long s_num;
+ unsigned int l_slot;
+ unsigned int l_num;
+};
+
+static void mptcp_diag_dump_listeners(struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ bool net_admin)
+{
+ struct inet_diag_dump_data *cb_data = cb->data;
+ struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
+ struct nlattr *bc = cb_data->inet_diag_nla_bc;
+ struct net *net = sock_net(skb->sk);
+ int i;
+
+ for (i = diag_ctx->l_slot; i < INET_LHTABLE_SIZE; i++) {
+ struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ int num = 0;
+
+ ilb = &tcp_hashinfo.listening_hash[i];
+
+ rcu_read_lock();
+ spin_lock(&ilb->lock);
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
+ const struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ int ret;
+
+ if (num < diag_ctx->l_num)
+ goto next_listen;
+
+ if (!ctx || strcmp(inet_csk(sk)->icsk_ulp_ops->name, "mptcp"))
+ goto next_listen;
+
+ sk = ctx->conn;
+ if (!sk || !net_eq(sock_net(sk), net))
+ goto next_listen;
+
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_listen;
+
+ if (r->id.idiag_sport != inet->inet_sport &&
+ r->id.idiag_sport)
+ goto next_listen;
+
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ goto next_listen;
+
+ ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin);
+
+ sock_put(sk);
+
+ if (ret < 0) {
+ spin_unlock(&ilb->lock);
+ rcu_read_unlock();
+ diag_ctx->l_slot = i;
+ diag_ctx->l_num = num;
+ return;
+ }
+ diag_ctx->l_num = num + 1;
+ num = 0;
+next_listen:
+ ++num;
+ }
+ spin_unlock(&ilb->lock);
+ rcu_read_unlock();
+
+ cond_resched();
+ diag_ctx->l_num = 0;
+ }
+
+ diag_ctx->l_num = 0;
+ diag_ctx->l_slot = i;
+}
+
static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+ struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
struct net *net = sock_net(skb->sk);
struct inet_diag_dump_data *cb_data;
struct mptcp_sock *msk;
struct nlattr *bc;
+ BUILD_BUG_ON(sizeof(cb->ctx) < sizeof(*diag_ctx));
+
cb_data = cb->data;
bc = cb_data->inet_diag_nla_bc;
- while ((msk = mptcp_token_iter_next(net, &cb->args[0], &cb->args[1])) !=
- NULL) {
+ while ((msk = mptcp_token_iter_next(net, &diag_ctx->s_slot,
+ &diag_ctx->s_num)) != NULL) {
struct inet_sock *inet = (struct inet_sock *)msk;
struct sock *sk = (struct sock *)msk;
int ret = 0;
@@ -101,11 +184,14 @@ next:
sock_put(sk);
if (ret < 0) {
/* will retry on the same position */
- cb->args[1]--;
+ diag_ctx->s_num--;
break;
}
cond_resched();
}
+
+ if ((r->idiag_states & TCPF_LISTEN) && r->id.idiag_dport == 0)
+ mptcp_diag_dump_listeners(skb, cb, r, net_admin);
}
static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
@@ -116,6 +202,19 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_rqueue = sk_rmem_alloc_get(sk);
r->idiag_wqueue = sk_wmem_alloc_get(sk);
+
+ if (inet_sk_state_load(sk) == TCP_LISTEN) {
+ struct sock *lsk = READ_ONCE(msk->first);
+
+ if (lsk) {
+ /* override with settings from tcp listener,
+ * so Send-Q will show accept queue.
+ */
+ r->idiag_rqueue = READ_ONCE(lsk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(lsk->sk_max_ack_backlog);
+ }
+ }
+
if (!info)
return;
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 325383646f5c..e05d9458a025 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -825,7 +825,7 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
opts->suboptions = 0;
- if (unlikely(__mptcp_check_fallback(msk)))
+ if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
return false;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
@@ -931,7 +931,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) &&
- READ_ONCE(msk->pm.server_side))
+ !subflow->request_join)
tcp_send_ack(ssk);
goto fully_established;
}
@@ -1133,7 +1133,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if ((mp_opt.suboptions & OPTION_MPTCP_ADD_ADDR) &&
add_addr_hmac_valid(msk, &mp_opt)) {
if (!mp_opt.echo) {
- mptcp_pm_add_addr_received(msk, &mp_opt.addr);
+ mptcp_pm_add_addr_received(sk, &mp_opt.addr);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
} else {
mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
@@ -1340,8 +1340,12 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
put_unaligned_be32(mpext->subflow_seq, ptr);
ptr += 1;
if (opts->csum_reqd) {
+ /* data_len == 0 is reserved for the infinite mapping,
+ * the checksum will also be set to 0.
+ */
put_unaligned_be32(mpext->data_len << 16 |
- mptcp_make_csum(mpext), ptr);
+ (mpext->data_len ? mptcp_make_csum(mpext) : 0),
+ ptr);
} else {
put_unaligned_be32(mpext->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 01809eef29b4..cdc2d79071f8 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -87,6 +87,9 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
unsigned int subflows_max;
int ret = 0;
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_active(msk);
+
subflows_max = mptcp_pm_get_subflows_max(msk);
pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
@@ -179,7 +182,8 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
bool update_subflows;
update_subflows = (ssk->sk_state == TCP_CLOSE) &&
- (subflow->request_join || subflow->mp_join);
+ (subflow->request_join || subflow->mp_join) &&
+ mptcp_pm_is_kernel(msk);
if (!READ_ONCE(pm->work_pending) && !update_subflows)
return;
@@ -196,19 +200,28 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
spin_unlock_bh(&pm->lock);
}
-void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+void mptcp_pm_add_addr_received(const struct sock *ssk,
const struct mptcp_addr_info *addr)
{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
READ_ONCE(pm->accept_addr));
- mptcp_event_addr_announced(msk, addr);
+ mptcp_event_addr_announced(ssk, addr);
spin_lock_bh(&pm->lock);
- if (!READ_ONCE(pm->accept_addr)) {
+ if (mptcp_pm_is_userspace(msk)) {
+ if (mptcp_userspace_pm_active(msk)) {
+ mptcp_pm_announce_addr(msk, addr, true);
+ mptcp_pm_add_addr_send_ack(msk);
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ }
+ } else if (!READ_ONCE(pm->accept_addr)) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
@@ -262,19 +275,52 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
spin_unlock_bh(&pm->lock);
}
-void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
+void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct sock *sk = subflow->conn;
+ struct mptcp_sock *msk;
pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
- subflow->backup = bkup;
+ msk = mptcp_sk(sk);
+ if (subflow->backup != bkup) {
+ subflow->backup = bkup;
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ msk->last_snd = NULL;
+ else
+ __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags);
+ mptcp_data_unlock(sk);
+ }
- mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC);
+ mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
}
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct sock *s = (struct sock *)msk;
+
pr_debug("fail_seq=%llu", fail_seq);
+
+ if (mptcp_has_another_subflow(sk) || !READ_ONCE(msk->allow_infinite_fallback))
+ return;
+
+ if (!READ_ONCE(subflow->mp_fail_response_expect)) {
+ pr_debug("send MP_FAIL response and infinite map");
+
+ subflow->send_mp_fail = 1;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
+ subflow->send_infinite_map = 1;
+ } else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
+ pr_debug("MP_FAIL response received");
+
+ mptcp_data_lock(s);
+ if (inet_sk_state_load(s) != TCP_CLOSE)
+ sk_stop_timer(s, &s->sk_timer);
+ mptcp_data_unlock(s);
+ }
}
/* path manager helpers */
@@ -382,27 +428,48 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
void mptcp_pm_data_reset(struct mptcp_sock *msk)
{
- msk->pm.add_addr_signaled = 0;
- msk->pm.add_addr_accepted = 0;
- msk->pm.local_addr_used = 0;
- msk->pm.subflows = 0;
- msk->pm.rm_list_tx.nr = 0;
- msk->pm.rm_list_rx.nr = 0;
- WRITE_ONCE(msk->pm.work_pending, false);
- WRITE_ONCE(msk->pm.addr_signal, 0);
- WRITE_ONCE(msk->pm.accept_addr, false);
- WRITE_ONCE(msk->pm.accept_subflow, false);
- WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
- msk->pm.status = 0;
- bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
+ struct mptcp_pm_data *pm = &msk->pm;
- mptcp_pm_nl_data_init(msk);
+ pm->add_addr_signaled = 0;
+ pm->add_addr_accepted = 0;
+ pm->local_addr_used = 0;
+ pm->subflows = 0;
+ pm->rm_list_tx.nr = 0;
+ pm->rm_list_rx.nr = 0;
+ WRITE_ONCE(pm->pm_type, pm_type);
+
+ if (pm_type == MPTCP_PM_TYPE_KERNEL) {
+ bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
+
+ /* pm->work_pending must be only be set to 'true' when
+ * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
+ */
+ WRITE_ONCE(pm->work_pending,
+ (!!mptcp_pm_get_local_addr_max(msk) &&
+ subflows_allowed) ||
+ !!mptcp_pm_get_add_addr_signal_max(msk));
+ WRITE_ONCE(pm->accept_addr,
+ !!mptcp_pm_get_add_addr_accept_max(msk) &&
+ subflows_allowed);
+ WRITE_ONCE(pm->accept_subflow, subflows_allowed);
+ } else {
+ WRITE_ONCE(pm->work_pending, 0);
+ WRITE_ONCE(pm->accept_addr, 0);
+ WRITE_ONCE(pm->accept_subflow, 0);
+ }
+
+ WRITE_ONCE(pm->addr_signal, 0);
+ WRITE_ONCE(pm->remote_deny_join_id0, false);
+ pm->status = 0;
+ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
}
void mptcp_pm_data_init(struct mptcp_sock *msk)
{
spin_lock_init(&msk->pm.lock);
INIT_LIST_HEAD(&msk->pm.anno_list);
+ INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
mptcp_pm_data_reset(msk);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index b5e8de6f7507..e099f2a12504 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -22,14 +22,6 @@ static struct genl_family mptcp_genl_family;
static int pm_nl_pernet_id;
-struct mptcp_pm_addr_entry {
- struct list_head list;
- struct mptcp_addr_info addr;
- u8 flags;
- int ifindex;
- struct socket *lsk;
-};
-
struct mptcp_pm_add_entry {
struct list_head list;
struct mptcp_addr_info addr;
@@ -55,8 +47,19 @@ struct pm_nl_pernet {
#define MPTCP_PM_ADDR_MAX 8
#define ADD_ADDR_RETRANS_MAX 3
-static bool addresses_equal(const struct mptcp_addr_info *a,
- const struct mptcp_addr_info *b, bool use_port)
+static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net)
+{
+ return net_generic(net, pm_nl_pernet_id);
+}
+
+static struct pm_nl_pernet *
+pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
+{
+ return pm_nl_get_pernet(sock_net((struct sock *)msk));
+}
+
+bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ const struct mptcp_addr_info *b, bool use_port)
{
bool addr_equals = false;
@@ -120,7 +123,7 @@ static bool lookup_subflow_by_saddr(const struct list_head *list,
skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
local_address(skc, &cur);
- if (addresses_equal(&cur, saddr, saddr->port))
+ if (mptcp_addresses_equal(&cur, saddr, saddr->port))
return true;
}
@@ -138,7 +141,7 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
remote_address(skc, &cur);
- if (addresses_equal(&cur, daddr, daddr->port))
+ if (mptcp_addresses_equal(&cur, daddr, daddr->port))
return true;
}
@@ -206,43 +209,39 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
{
- const struct pm_nl_pernet *pernet;
+ const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- pernet = net_generic(sock_net((const struct sock *)msk), pm_nl_pernet_id);
return READ_ONCE(pernet->add_addr_signal_max);
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
{
- struct pm_nl_pernet *pernet;
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
return READ_ONCE(pernet->add_addr_accept_max);
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
{
- struct pm_nl_pernet *pernet;
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
return READ_ONCE(pernet->subflows_max);
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
{
- struct pm_nl_pernet *pernet;
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
return READ_ONCE(pernet->local_addr_max);
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
{
- struct pm_nl_pernet *pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
(find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
@@ -262,7 +261,7 @@ mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
lockdep_assert_held(&msk->pm.lock);
list_for_each_entry(entry, &msk->pm.anno_list, list) {
- if (addresses_equal(&entry->addr, addr, true))
+ if (mptcp_addresses_equal(&entry->addr, addr, true))
return entry;
}
@@ -279,7 +278,7 @@ bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
spin_lock_bh(&msk->pm.lock);
list_for_each_entry(entry, &msk->pm.anno_list, list) {
- if (addresses_equal(&entry->addr, &saddr, true)) {
+ if (mptcp_addresses_equal(&entry->addr, &saddr, true)) {
ret = true;
goto out;
}
@@ -353,8 +352,8 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
return entry;
}
-static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
- const struct mptcp_pm_addr_entry *entry)
+bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ const struct mptcp_pm_addr_entry *entry)
{
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
@@ -362,8 +361,16 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
lockdep_assert_held(&msk->pm.lock);
- if (mptcp_lookup_anno_list_by_saddr(msk, &entry->addr))
- return false;
+ add_entry = mptcp_lookup_anno_list_by_saddr(msk, &entry->addr);
+
+ if (add_entry) {
+ if (mptcp_pm_is_kernel(msk))
+ return false;
+
+ sk_reset_timer(sk, &add_entry->add_timer,
+ jiffies + mptcp_get_add_addr_timeout(net));
+ return true;
+ }
add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
if (!add_entry)
@@ -406,7 +413,7 @@ static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned
int i;
for (i = 0; i < nr; i++) {
- if (addresses_equal(&addrs[i], addr, addr->port))
+ if (mptcp_addresses_equal(&addrs[i], addr, addr->port))
return true;
}
@@ -442,7 +449,7 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
mptcp_for_each_subflow(msk, subflow) {
ssk = mptcp_subflow_tcp_sock(subflow);
remote_address((struct sock_common *)ssk, &addrs[i]);
- if (deny_id0 && addresses_equal(&addrs[i], &remote, false))
+ if (deny_id0 && mptcp_addresses_equal(&addrs[i], &remote, false))
continue;
if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
@@ -475,7 +482,7 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id && addresses_equal(&entry->addr, info, true)) ||
+ if ((!lookup_by_id && mptcp_addresses_equal(&entry->addr, info, true)) ||
(lookup_by_id && entry->addr.id == info->id))
return entry;
}
@@ -490,7 +497,7 @@ lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_inf
rcu_read_lock();
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if (addresses_equal(&entry->addr, addr, entry->addr.port)) {
+ if (mptcp_addresses_equal(&entry->addr, addr, entry->addr.port)) {
ret = entry->addr.id;
break;
}
@@ -508,7 +515,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
struct pm_nl_pernet *pernet;
unsigned int subflows_max;
- pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+ pernet = pm_nl_get_pernet(sock_net(sk));
add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
local_addr_max = mptcp_pm_get_local_addr_max(msk);
@@ -604,7 +611,7 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
unsigned int subflows_max;
int i = 0;
- pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+ pernet = pm_nl_get_pernet_from_msk(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
rcu_read_lock();
@@ -724,9 +731,11 @@ static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
struct mptcp_addr_info local;
local_address((struct sock_common *)ssk, &local);
- if (!addresses_equal(&local, addr, addr->port))
+ if (!mptcp_addresses_equal(&local, addr, addr->port))
continue;
+ if (subflow->backup != bkup)
+ msk->last_snd = NULL;
subflow->backup = bkup;
subflow->send_mp_prio = 1;
subflow->request_bkup = bkup;
@@ -796,6 +805,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
if (!removed)
continue;
+ if (!mptcp_pm_is_kernel(msk))
+ continue;
+
if (rm_type == MPTCP_MIB_RMADDR) {
msk->pm.add_addr_accepted--;
WRITE_ONCE(msk->pm.accept_addr, true);
@@ -889,9 +901,9 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
* singled addresses
*/
list_for_each_entry(cur, &pernet->local_addr_list, list) {
- if (addresses_equal(&cur->addr, &entry->addr,
- address_use_port(entry) &&
- address_use_port(cur))) {
+ if (mptcp_addresses_equal(&cur->addr, &entry->addr,
+ address_use_port(entry) &&
+ address_use_port(cur))) {
/* allow replacing the exiting endpoint only if such
* endpoint is an implicit one and the user-space
* did not provide an endpoint id
@@ -1018,14 +1030,17 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
*/
local_address((struct sock_common *)msk, &msk_local);
local_address((struct sock_common *)skc, &skc_local);
- if (addresses_equal(&msk_local, &skc_local, false))
+ if (mptcp_addresses_equal(&msk_local, &skc_local, false))
return 0;
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_get_local_id(msk, &skc_local);
+
+ pernet = pm_nl_get_pernet_from_msk(msk);
rcu_read_lock();
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (addresses_equal(&entry->addr, &skc_local, entry->addr.port)) {
+ if (mptcp_addresses_equal(&entry->addr, &skc_local, entry->addr.port)) {
ret = entry->addr.id;
break;
}
@@ -1052,18 +1067,6 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return ret;
}
-void mptcp_pm_nl_data_init(struct mptcp_sock *msk)
-{
- struct mptcp_pm_data *pm = &msk->pm;
- bool subflows;
-
- subflows = !!mptcp_pm_get_subflows_max(msk);
- WRITE_ONCE(pm->work_pending, (!!mptcp_pm_get_local_addr_max(msk) && subflows) ||
- !!mptcp_pm_get_add_addr_signal_max(msk));
- WRITE_ONCE(pm->accept_addr, !!mptcp_pm_get_add_addr_accept_max(msk) && subflows);
- WRITE_ONCE(pm->accept_subflow, subflows);
-}
-
#define MPTCP_PM_CMD_GRP_OFFSET 0
#define MPTCP_PM_EV_GRP_OFFSET 1
@@ -1091,6 +1094,10 @@ static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = {
NLA_POLICY_NESTED(mptcp_pm_addr_policy),
[MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
[MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_LOC_ID] = { .type = NLA_U8, },
+ [MPTCP_PM_ATTR_ADDR_REMOTE] =
+ NLA_POLICY_NESTED(mptcp_pm_addr_policy),
};
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
@@ -1139,11 +1146,12 @@ static int mptcp_pm_family_to_addr(int family)
return MPTCP_PM_ADDR_ATTR_ADDR4;
}
-static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
- bool require_family,
- struct mptcp_pm_addr_entry *entry)
+static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[],
+ const struct nlattr *attr,
+ struct genl_info *info,
+ struct mptcp_addr_info *addr,
+ bool require_family)
{
- struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
int err, addr_addr;
if (!attr) {
@@ -1157,27 +1165,29 @@ static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
if (err)
return err;
- memset(entry, 0, sizeof(*entry));
+ if (tb[MPTCP_PM_ADDR_ATTR_ID])
+ addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
+
if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
if (!require_family)
- goto skip_family;
+ return err;
NL_SET_ERR_MSG_ATTR(info->extack, attr,
"missing family");
return -EINVAL;
}
- entry->addr.family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
- if (entry->addr.family != AF_INET
+ addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
+ if (addr->family != AF_INET
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- && entry->addr.family != AF_INET6
+ && addr->family != AF_INET6
#endif
) {
NL_SET_ERR_MSG_ATTR(info->extack, attr,
"unknown address family");
return -EINVAL;
}
- addr_addr = mptcp_pm_family_to_addr(entry->addr.family);
+ addr_addr = mptcp_pm_family_to_addr(addr->family);
if (!tb[addr_addr]) {
NL_SET_ERR_MSG_ATTR(info->extack, attr,
"missing address data");
@@ -1185,22 +1195,47 @@ static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if (entry->addr.family == AF_INET6)
- entry->addr.addr6 = nla_get_in6_addr(tb[addr_addr]);
+ if (addr->family == AF_INET6)
+ addr->addr6 = nla_get_in6_addr(tb[addr_addr]);
else
#endif
- entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
+ addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]);
+
+ if (tb[MPTCP_PM_ADDR_ATTR_PORT])
+ addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
+
+ return err;
+}
+
+int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
+ struct mptcp_addr_info *addr)
+{
+ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
+
+ memset(addr, 0, sizeof(*addr));
+
+ return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true);
+}
+
+int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
+ bool require_family,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
+ int err;
+
+ memset(entry, 0, sizeof(*entry));
+
+ err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family);
+ if (err)
+ return err;
-skip_family:
if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
entry->ifindex = val;
}
- if (tb[MPTCP_PM_ADDR_ATTR_ID])
- entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
-
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
@@ -1212,7 +1247,7 @@ skip_family:
static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
{
- return net_generic(genl_info_net(info), pm_nl_pernet_id);
+ return pm_nl_get_pernet(genl_info_net(info));
}
static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
@@ -1223,7 +1258,8 @@ static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
- if (!READ_ONCE(msk->fully_established))
+ if (!READ_ONCE(msk->fully_established) ||
+ mptcp_pm_is_userspace(msk))
goto next;
lock_sock(sk);
@@ -1247,7 +1283,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
struct mptcp_pm_addr_entry addr, *entry;
int ret;
- ret = mptcp_pm_parse_addr(attr, info, true, &addr);
+ ret = mptcp_pm_parse_entry(attr, info, true, &addr);
if (ret < 0)
return ret;
@@ -1296,17 +1332,25 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
+int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
u8 *flags, int *ifindex)
{
struct mptcp_pm_addr_entry *entry;
+ struct sock *sk = (struct sock *)msk;
+ struct net *net = sock_net(sk);
*flags = 0;
*ifindex = 0;
if (id) {
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk,
+ id,
+ flags,
+ ifindex);
+
rcu_read_lock();
- entry = __lookup_addr_by_id(net_generic(net, pm_nl_pernet_id), id);
+ entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
if (entry) {
*flags = entry->flags;
*ifindex = entry->ifindex;
@@ -1366,6 +1410,9 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
struct sock *sk = (struct sock *)msk;
bool remove_subflow;
+ if (mptcp_pm_is_userspace(msk))
+ goto next;
+
if (list_empty(&msk->conn_list)) {
mptcp_pm_remove_anno_addr(msk, addr, false);
goto next;
@@ -1400,11 +1447,11 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
struct sock *sk = (struct sock *)msk;
struct mptcp_addr_info msk_local;
- if (list_empty(&msk->conn_list))
+ if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
goto next;
local_address((struct sock_common *)msk, &msk_local);
- if (!addresses_equal(&msk_local, addr, addr->port))
+ if (!mptcp_addresses_equal(&msk_local, addr, addr->port))
goto next;
lock_sock(sk);
@@ -1430,7 +1477,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
unsigned int addr_max;
int ret;
- ret = mptcp_pm_parse_addr(attr, info, false, &addr);
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
if (ret < 0)
return ret;
@@ -1470,8 +1517,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
return ret;
}
-static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list)
+void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
@@ -1507,9 +1554,11 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
- lock_sock(sk);
- mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
- release_sock(sk);
+ if (!mptcp_pm_is_userspace(msk)) {
+ lock_sock(sk);
+ mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
+ release_sock(sk);
+ }
sock_put(sk);
cond_resched();
@@ -1602,7 +1651,7 @@ static int mptcp_nl_cmd_get_addr(struct sk_buff *skb, struct genl_info *info)
void *reply;
int ret;
- ret = mptcp_pm_parse_addr(attr, info, false, &addr);
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
if (ret < 0)
return ret;
@@ -1653,7 +1702,7 @@ static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
void *hdr;
int i;
- pernet = net_generic(net, pm_nl_pernet_id);
+ pernet = pm_nl_get_pernet(net);
spin_lock_bh(&pernet->lock);
for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
@@ -1782,7 +1831,7 @@ static int mptcp_nl_set_flags(struct net *net,
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
- if (list_empty(&msk->conn_list))
+ if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
goto next;
lock_sock(sk);
@@ -1813,7 +1862,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
u8 bkup = 0, lookup_by_id = 0;
int ret;
- ret = mptcp_pm_parse_addr(attr, info, false, &addr);
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
if (ret < 0)
return ret;
@@ -1852,6 +1901,13 @@ static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gf
nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp);
}
+bool mptcp_userspace_pm_active(const struct mptcp_sock *msk)
+{
+ return genl_has_listeners(&mptcp_genl_family,
+ sock_net((const struct sock *)msk),
+ MPTCP_PM_EV_GRP_OFFSET);
+}
+
static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
{
const struct inet_sock *issk = inet_sk(ssk);
@@ -1972,6 +2028,9 @@ static int mptcp_event_created(struct sk_buff *skb,
if (err)
return err;
+ if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
+ return -EMSGSIZE;
+
return mptcp_event_add_subflow(skb, ssk);
}
@@ -2006,10 +2065,12 @@ nla_put_failure:
kfree_skb(skb);
}
-void mptcp_event_addr_announced(const struct mptcp_sock *msk,
+void mptcp_event_addr_announced(const struct sock *ssk,
const struct mptcp_addr_info *info)
{
- struct net *net = sock_net((const struct sock *)msk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct net *net = sock_net(ssk);
struct nlmsghdr *nlh;
struct sk_buff *skb;
@@ -2031,7 +2092,10 @@ void mptcp_event_addr_announced(const struct mptcp_sock *msk,
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
goto nla_put_failure;
- if (nla_put_be16(skb, MPTCP_ATTR_DPORT, info->port))
+ if (nla_put_be16(skb, MPTCP_ATTR_DPORT,
+ info->port == 0 ?
+ inet_sk(ssk)->inet_dport :
+ info->port))
goto nla_put_failure;
switch (info->family) {
@@ -2148,6 +2212,26 @@ static const struct genl_small_ops mptcp_pm_ops[] = {
.doit = mptcp_nl_cmd_set_flags,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = MPTCP_PM_CMD_ANNOUNCE,
+ .doit = mptcp_nl_cmd_announce,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_REMOVE,
+ .doit = mptcp_nl_cmd_remove,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE,
+ .doit = mptcp_nl_cmd_sf_create,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY,
+ .doit = mptcp_nl_cmd_sf_destroy,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family mptcp_genl_family __ro_after_init = {
@@ -2165,7 +2249,7 @@ static struct genl_family mptcp_genl_family __ro_after_init = {
static int __net_init pm_nl_init_net(struct net *net)
{
- struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
@@ -2187,7 +2271,7 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
struct net *net;
list_for_each_entry(net, net_list, exit_list) {
- struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
/* net is removed from namespace list, can't race with
* other modifiers, also netns core already waited for a
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
new file mode 100644
index 000000000000..f56378e4f597
--- /dev/null
+++ b/net/mptcp/pm_userspace.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Multipath TCP
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ */
+
+#include "protocol.h"
+
+void mptcp_free_local_addr_list(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_addr_entry *entry, *tmp;
+ struct sock *sk = (struct sock *)msk;
+ LIST_HEAD(free_list);
+
+ if (!mptcp_pm_is_userspace(msk))
+ return;
+
+ spin_lock_bh(&msk->pm.lock);
+ list_splice_init(&msk->pm.userspace_pm_local_addr_list, &free_list);
+ spin_unlock_bh(&msk->pm.lock);
+
+ list_for_each_entry_safe(entry, tmp, &free_list, list) {
+ sock_kfree_s(sk, entry, sizeof(*entry));
+ }
+}
+
+int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry)
+{
+ DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ struct mptcp_pm_addr_entry *match = NULL;
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_addr_entry *e;
+ bool addr_match = false;
+ bool id_match = false;
+ int ret = -EINVAL;
+
+ bitmap_zero(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
+ if (addr_match && entry->addr.id == 0)
+ entry->addr.id = e->addr.id;
+ id_match = (e->addr.id == entry->addr.id);
+ if (addr_match && id_match) {
+ match = e;
+ break;
+ } else if (addr_match || id_match) {
+ break;
+ }
+ __set_bit(e->addr.id, id_bitmap);
+ }
+
+ if (!match && !addr_match && !id_match) {
+ /* Memory for the entry is allocated from the
+ * sock option buffer.
+ */
+ e = sock_kmalloc(sk, sizeof(*e), GFP_ATOMIC);
+ if (!e) {
+ spin_unlock_bh(&msk->pm.lock);
+ return -ENOMEM;
+ }
+
+ *e = *entry;
+ if (!e->addr.id)
+ e->addr.id = find_next_zero_bit(id_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1,
+ 1);
+ list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
+ ret = e->addr.id;
+ } else if (match) {
+ ret = entry->addr.id;
+ }
+
+ spin_unlock_bh(&msk->pm.lock);
+ return ret;
+}
+
+int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ unsigned int id,
+ u8 *flags, int *ifindex)
+{
+ struct mptcp_pm_addr_entry *entry, *match = NULL;
+
+ *flags = 0;
+ *ifindex = 0;
+
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (id == entry->addr.id) {
+ match = entry;
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ if (match) {
+ *flags = match->flags;
+ *ifindex = match->ifindex;
+ }
+
+ return 0;
+}
+
+int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+{
+ struct mptcp_pm_addr_entry new_entry;
+ __be16 msk_sport = ((struct inet_sock *)
+ inet_sk((struct sock *)msk))->inet_sport;
+
+ memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry));
+ new_entry.addr = *skc;
+ new_entry.addr.id = 0;
+ new_entry.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+
+ if (new_entry.addr.port == msk_sport)
+ new_entry.addr.port = 0;
+
+ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
+}
+
+int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *addr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct mptcp_pm_addr_entry addr_val;
+ struct mptcp_sock *msk;
+ int err = -EINVAL;
+ u32 token_val;
+
+ if (!addr || !token) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
+ return err;
+ }
+
+ token_val = nla_get_u32(token);
+
+ msk = mptcp_token_get_sock(sock_net(skb->sk), token_val);
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return err;
+ }
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto announce_err;
+ }
+
+ err = mptcp_pm_parse_entry(addr, info, true, &addr_val);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "error parsing local address");
+ goto announce_err;
+ }
+
+ if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+ GENL_SET_ERR_MSG(info, "invalid addr id or flags");
+ goto announce_err;
+ }
+
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "did not match address and id");
+ goto announce_err;
+ }
+
+ lock_sock((struct sock *)msk);
+ spin_lock_bh(&msk->pm.lock);
+
+ if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
+ mptcp_pm_announce_addr(msk, &addr_val.addr, false);
+ mptcp_pm_nl_addr_send_ack(msk);
+ }
+
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock((struct sock *)msk);
+
+ err = 0;
+ announce_err:
+ sock_put((struct sock *)msk);
+ return err;
+}
+
+int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
+ struct mptcp_pm_addr_entry *match = NULL;
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ LIST_HEAD(free_list);
+ int err = -EINVAL;
+ u32 token_val;
+ u8 id_val;
+
+ if (!id || !token) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
+ return err;
+ }
+
+ id_val = nla_get_u8(id);
+ token_val = nla_get_u32(token);
+
+ msk = mptcp_token_get_sock(sock_net(skb->sk), token_val);
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return err;
+ }
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto remove_err;
+ }
+
+ lock_sock((struct sock *)msk);
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (entry->addr.id == id_val) {
+ match = entry;
+ break;
+ }
+ }
+
+ if (!match) {
+ GENL_SET_ERR_MSG(info, "address with specified id not found");
+ release_sock((struct sock *)msk);
+ goto remove_err;
+ }
+
+ list_move(&match->list, &free_list);
+
+ mptcp_pm_remove_addrs_and_subflows(msk, &free_list);
+
+ release_sock((struct sock *)msk);
+
+ list_for_each_entry_safe(match, entry, &free_list, list) {
+ sock_kfree_s((struct sock *)msk, match, sizeof(*match));
+ }
+
+ err = 0;
+ remove_err:
+ sock_put((struct sock *)msk);
+ return err;
+}
+
+int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct mptcp_addr_info addr_r;
+ struct mptcp_addr_info addr_l;
+ struct mptcp_sock *msk;
+ int err = -EINVAL;
+ struct sock *sk;
+ u32 token_val;
+
+ if (!laddr || !raddr || !token) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
+ return err;
+ }
+
+ token_val = nla_get_u32(token);
+
+ msk = mptcp_token_get_sock(genl_info_net(info), token_val);
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return err;
+ }
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto create_err;
+ }
+
+ err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ if (err < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
+ goto create_err;
+ }
+
+ if (addr_l.id == 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
+ goto create_err;
+ }
+
+ err = mptcp_pm_parse_addr(raddr, info, &addr_r);
+ if (err < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
+ goto create_err;
+ }
+
+ sk = &msk->sk.icsk_inet.sk;
+ lock_sock(sk);
+
+ err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+
+ release_sock(sk);
+
+ create_err:
+ sock_put((struct sock *)msk);
+ return err;
+}
+
+static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *local,
+ const struct mptcp_addr_info *remote)
+{
+ struct sock *sk = &msk->sk.icsk_inet.sk;
+ struct mptcp_subflow_context *subflow;
+ struct sock *found = NULL;
+
+ if (local->family != remote->family)
+ return NULL;
+
+ lock_sock(sk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ const struct inet_sock *issk;
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (local->family != ssk->sk_family)
+ continue;
+
+ issk = inet_sk(ssk);
+
+ switch (ssk->sk_family) {
+ case AF_INET:
+ if (issk->inet_saddr != local->addr.s_addr ||
+ issk->inet_daddr != remote->addr.s_addr)
+ continue;
+ break;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ case AF_INET6: {
+ const struct ipv6_pinfo *pinfo = inet6_sk(ssk);
+
+ if (!ipv6_addr_equal(&local->addr6, &pinfo->saddr) ||
+ !ipv6_addr_equal(&remote->addr6, &ssk->sk_v6_daddr))
+ continue;
+ break;
+ }
+#endif
+ default:
+ continue;
+ }
+
+ if (issk->inet_sport == local->port &&
+ issk->inet_dport == remote->port) {
+ found = ssk;
+ goto found;
+ }
+ }
+
+found:
+ release_sock(sk);
+
+ return found;
+}
+
+int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct mptcp_addr_info addr_l;
+ struct mptcp_addr_info addr_r;
+ struct mptcp_sock *msk;
+ struct sock *sk, *ssk;
+ int err = -EINVAL;
+ u32 token_val;
+
+ if (!laddr || !raddr || !token) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
+ return err;
+ }
+
+ token_val = nla_get_u32(token);
+
+ msk = mptcp_token_get_sock(genl_info_net(info), token_val);
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return err;
+ }
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto destroy_err;
+ }
+
+ err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ if (err < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
+ goto destroy_err;
+ }
+
+ err = mptcp_pm_parse_addr(raddr, info, &addr_r);
+ if (err < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
+ goto destroy_err;
+ }
+
+ if (addr_l.family != addr_r.family) {
+ GENL_SET_ERR_MSG(info, "address families do not match");
+ goto destroy_err;
+ }
+
+ if (!addr_l.port || !addr_r.port) {
+ GENL_SET_ERR_MSG(info, "missing local or remote port");
+ goto destroy_err;
+ }
+
+ sk = &msk->sk.icsk_inet.sk;
+ ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
+ if (ssk) {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+
+ mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+ mptcp_close_ssk(sk, ssk, subflow);
+ err = 0;
+ } else {
+ err = -ESRCH;
+ }
+
+ destroy_err:
+ sock_put((struct sock *)msk);
+ return err;
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 0cbea3b6d0a4..52ed2c0ac901 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1229,6 +1229,22 @@ static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
}
+static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ struct sock *ssk,
+ struct mptcp_ext *mpext)
+{
+ if (!mpext)
+ return;
+
+ mpext->infinite_map = 1;
+ mpext->data_len = 0;
+
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
+ mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
+ pr_fallback(msk);
+ __mptcp_do_fallback(msk);
+}
+
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
struct mptcp_data_frag *dfrag,
struct mptcp_sendmsg_info *info)
@@ -1360,6 +1376,8 @@ alloc_skb:
out:
if (READ_ONCE(msk->csum_enabled))
mptcp_update_data_checksum(skb, copy);
+ if (mptcp_subflow_ctx(ssk)->send_infinite_map)
+ mptcp_update_infinite_map(msk, ssk, mpext);
trace_mptcp_sendmsg_frag(mpext);
mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
return copy;
@@ -1587,8 +1605,10 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
out:
/* ensure the rtx timer is running */
+ mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
+ mptcp_data_unlock(sk);
if (copied)
__mptcp_check_send_data_fin(sk);
}
@@ -2012,7 +2032,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
}
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct scm_timestamping_internal tss;
@@ -2030,7 +2050,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out_err;
}
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
len = min_t(size_t, len, INT_MAX);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
@@ -2149,10 +2169,38 @@ static void mptcp_retransmit_timer(struct timer_list *t)
sock_put(sk);
}
+static struct mptcp_subflow_context *
+mp_fail_response_expect_subflow(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *ret = NULL;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ if (READ_ONCE(subflow->mp_fail_response_expect)) {
+ ret = subflow;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void mptcp_check_mp_fail_response(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+
+ bh_lock_sock(sk);
+ subflow = mp_fail_response_expect_subflow(msk);
+ if (subflow)
+ __set_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags);
+ bh_unlock_sock(sk);
+}
+
static void mptcp_timeout_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
+ mptcp_check_mp_fail_response(mptcp_sk(sk));
mptcp_schedule_work(sk);
sock_put(sk);
}
@@ -2465,6 +2513,7 @@ static void __mptcp_retrans(struct sock *sk)
dfrag->already_sent = max(dfrag->already_sent, info.sent);
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
info.size_goal);
+ WRITE_ONCE(msk->allow_infinite_fallback, false);
}
release_sock(ssk);
@@ -2472,8 +2521,27 @@ static void __mptcp_retrans(struct sock *sk)
reset_timer:
mptcp_check_and_set_pending(sk);
+ mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
+ mptcp_data_unlock(sk);
+}
+
+static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *ssk;
+ bool slow;
+
+ subflow = mp_fail_response_expect_subflow(msk);
+ if (subflow) {
+ pr_debug("MP_FAIL doesn't respond, reset the subflow");
+
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
+ mptcp_subflow_reset(ssk);
+ unlock_sock_fast(ssk, slow);
+ }
}
static void mptcp_worker(struct work_struct *work)
@@ -2516,6 +2584,9 @@ static void mptcp_worker(struct work_struct *work)
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
+ if (test_and_clear_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags))
+ mptcp_mp_fail_no_response(msk);
+
unlock:
release_sock(sk);
sock_put(sk);
@@ -2539,6 +2610,7 @@ static int __mptcp_init_sock(struct sock *sk)
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
+ WRITE_ONCE(msk->allow_infinite_fallback, true);
msk->recovery = false;
mptcp_pm_data_init(msk);
@@ -2631,8 +2703,10 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
} else {
pr_debug("Sending DATA_FIN on subflow %p", ssk);
tcp_send_ack(ssk);
+ mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
+ mptcp_data_unlock(sk);
}
break;
}
@@ -2733,8 +2807,10 @@ static void __mptcp_destroy_sock(struct sock *sk)
/* join list will be eventually flushed (with rst) at sock lock release time*/
list_splice_init(&msk->conn_list, &conn_list);
- sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
+ mptcp_data_lock(sk);
+ mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
+ mptcp_data_unlock(sk);
msk->pm.status = 0;
/* clears msk->subflow, allowing the following loop to close
@@ -2796,7 +2872,9 @@ cleanup:
__mptcp_destroy_sock(sk);
do_cancel_work = true;
} else {
+ mptcp_data_lock(sk);
sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
+ mptcp_data_unlock(sk);
}
release_sock(sk);
if (do_cancel_work)
@@ -2841,8 +2919,10 @@ static int mptcp_disconnect(struct sock *sk, int flags)
__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
}
- sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
+ mptcp_data_lock(sk);
+ mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
+ mptcp_data_unlock(sk);
if (mptcp_sk(sk)->token)
mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
@@ -3017,6 +3097,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk)
msk->rmem_fwd_alloc = 0;
mptcp_token_destroy(msk);
mptcp_pm_free_anno_list(msk);
+ mptcp_free_local_addr_list(msk);
}
static void mptcp_destroy(struct sock *sk)
@@ -3092,15 +3173,19 @@ static void mptcp_release_cb(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
}
- /* be sure to set the current sk state before tacking actions
- * depending on sk_state
- */
- if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
- __mptcp_set_connected(sk);
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
__mptcp_clean_una_wakeup(sk);
- if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
- __mptcp_error_report(sk);
+ if (unlikely(&msk->cb_flags)) {
+ /* be sure to set the current sk state before tacking actions
+ * depending on sk_state, that is processing MPTCP_ERROR_REPORT
+ */
+ if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
+ __mptcp_set_connected(sk);
+ if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
+ __mptcp_error_report(sk);
+ if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
+ msk->last_snd = NULL;
+ }
__mptcp_update_rmem(sk);
}
@@ -3237,15 +3322,12 @@ bool mptcp_finish_join(struct sock *ssk)
return false;
}
- if (!msk->pm.server_side)
+ if (!list_empty(&subflow->node))
goto out;
if (!mptcp_pm_allow_new_subflow(msk))
goto err_prohibited;
- if (WARN_ON_ONCE(!list_empty(&subflow->node)))
- goto err_prohibited;
-
/* active connections are already on conn_list.
* If we can't acquire msk socket lock here, let the release callback
* handle it
@@ -3271,6 +3353,7 @@ err_prohibited:
}
subflow->map_seq = READ_ONCE(msk->ack_seq);
+ WRITE_ONCE(msk->allow_infinite_fallback, false);
out:
mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3c1a3036550f..f542aeaa5b09 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -11,6 +11,7 @@
#include <net/tcp.h>
#include <net/inet_connection_sock.h>
#include <uapi/linux/mptcp.h>
+#include <net/genetlink.h>
#define MPTCP_SUPPORTED_VERSION 1
@@ -116,6 +117,7 @@
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_FAIL_NO_RESPONSE 6
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -124,6 +126,7 @@
#define MPTCP_RETRANSMIT 4
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_CONNECTED 6
+#define MPTCP_RESET_SCHEDULER 7
static inline bool before64(__u64 seq1, __u64 seq2)
{
@@ -182,6 +185,14 @@ enum mptcp_pm_status {
*/
};
+enum mptcp_pm_type {
+ MPTCP_PM_TYPE_KERNEL = 0,
+ MPTCP_PM_TYPE_USERSPACE,
+
+ __MPTCP_PM_TYPE_NR,
+ __MPTCP_PM_TYPE_MAX = __MPTCP_PM_TYPE_NR - 1,
+};
+
/* Status bits below MPTCP_PM_ALREADY_ESTABLISHED need pm worker actions */
#define MPTCP_PM_WORK_MASK ((1 << MPTCP_PM_ALREADY_ESTABLISHED) - 1)
@@ -198,6 +209,7 @@ struct mptcp_pm_data {
struct mptcp_addr_info local;
struct mptcp_addr_info remote;
struct list_head anno_list;
+ struct list_head userspace_pm_local_addr_list;
spinlock_t lock; /*protects the whole PM data */
@@ -210,6 +222,7 @@ struct mptcp_pm_data {
u8 add_addr_signaled;
u8 add_addr_accepted;
u8 local_addr_used;
+ u8 pm_type;
u8 subflows;
u8 status;
DECLARE_BITMAP(id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
@@ -217,6 +230,14 @@ struct mptcp_pm_data {
struct mptcp_rm_list rm_list_rx;
};
+struct mptcp_pm_addr_entry {
+ struct list_head list;
+ struct mptcp_addr_info addr;
+ u8 flags;
+ int ifindex;
+ struct socket *lsk;
+};
+
struct mptcp_data_frag {
struct list_head list;
u64 data_seq;
@@ -262,6 +283,7 @@ struct mptcp_sock {
bool rcv_fastclose;
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
bool csum_enabled;
+ bool allow_infinite_fallback;
u8 recvmsg_inq:1,
cork:1,
nodelay:1;
@@ -439,12 +461,14 @@ struct mptcp_subflow_context {
send_mp_prio : 1,
send_mp_fail : 1,
send_fastclose : 1,
+ send_infinite_map : 1,
rx_eof : 1,
can_ack : 1, /* only after processing the remote a key */
disposable : 1, /* ctx can be free at ulp release time */
stale : 1, /* unable to snd/rcv data, do not use for xmit */
local_id_valid : 1; /* local_id is correctly initialized */
enum mptcp_data_avail data_avail;
+ bool mp_fail_response_expect;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -571,6 +595,7 @@ unsigned int mptcp_get_add_addr_timeout(const struct net *net);
int mptcp_is_checksum_enabled(const struct net *net);
int mptcp_allow_join_id0(const struct net *net);
unsigned int mptcp_stale_loss_cnt(const struct net *net);
+int mptcp_get_pm_type(const struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool __mptcp_retransmit_pending_data(struct sock *sk);
@@ -586,6 +611,9 @@ void mptcp_subflow_reset(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ const struct mptcp_addr_info *b, bool use_port);
+
/* called with sk socket lock held */
int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
const struct mptcp_addr_info *remote);
@@ -728,6 +756,11 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
void __init mptcp_pm_init(void);
void mptcp_pm_data_init(struct mptcp_sock *msk);
void mptcp_pm_data_reset(struct mptcp_sock *msk);
+int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
+ struct mptcp_addr_info *addr);
+int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
+ bool require_family,
+ struct mptcp_pm_addr_entry *entry);
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
@@ -738,7 +771,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk);
bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk);
void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
const struct mptcp_subflow_context *subflow);
-void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+void mptcp_pm_add_addr_received(const struct sock *ssk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
@@ -748,6 +781,8 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq);
+bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ const struct mptcp_pm_addr_entry *entry);
void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
struct mptcp_pm_add_entry *
@@ -756,19 +791,34 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
-int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
+int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ unsigned int id,
u8 *flags, int *ifindex);
+int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ unsigned int id,
+ u8 *flags, int *ifindex);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list);
+
+int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry);
+void mptcp_free_local_addr_list(struct mptcp_sock *msk);
+int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
+int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info);
+int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info);
+int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info);
void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
const struct sock *ssk, gfp_t gfp);
-void mptcp_event_addr_announced(const struct mptcp_sock *msk, const struct mptcp_addr_info *info);
+void mptcp_event_addr_announced(const struct sock *ssk, const struct mptcp_addr_info *info);
void mptcp_event_addr_removed(const struct mptcp_sock *msk, u8 id);
+bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
@@ -791,6 +841,16 @@ static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL);
}
+static inline bool mptcp_pm_is_userspace(const struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.pm_type) == MPTCP_PM_TYPE_USERSPACE;
+}
+
+static inline bool mptcp_pm_is_kernel(const struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.pm_type) == MPTCP_PM_TYPE_KERNEL;
+}
+
static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port)
{
u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
@@ -821,9 +881,9 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
struct mptcp_rm_list *rm_list);
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
void __init mptcp_pm_nl_init(void);
-void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
void mptcp_pm_nl_work(struct mptcp_sock *msk);
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
@@ -875,13 +935,28 @@ static inline void mptcp_do_fallback(struct sock *sk)
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+{
+ struct mptcp_ext *mpext;
+
+ mpext = skb ? mptcp_get_ext(skb) : NULL;
+ if (mpext && mpext->infinite_map)
+ return true;
+
+ return false;
+}
+
+static inline bool is_active_ssk(struct mptcp_subflow_context *subflow)
+{
+ return (subflow->request_mptcp || subflow->request_join);
+}
+
static inline bool subflow_simultaneous_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sock *parent = subflow->conn;
return sk->sk_state == TCP_ESTABLISHED &&
- !mptcp_sk(parent)->pm.server_side &&
+ is_active_ssk(subflow) &&
!subflow->conn_finished;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index f949d22f52bd..826b0c1dae98 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -853,15 +853,11 @@ out:
void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
{
- struct sock *sk = &msk->sk.icsk_inet.sk;
u32 flags = 0;
- bool slow;
u8 val;
memset(info, 0, sizeof(*info));
- slow = lock_sock_fast(sk);
-
info->mptcpi_subflows = READ_ONCE(msk->pm.subflows);
info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled);
info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted);
@@ -882,8 +878,6 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
info->mptcpi_snd_una = READ_ONCE(msk->snd_una);
info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq);
info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
-
- unlock_sock_fast(sk, slow);
}
EXPORT_SYMBOL_GPL(mptcp_diag_fill_info);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index aba260f547da..6d59336a8e1e 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -62,7 +62,9 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
{
return mptcp_is_fully_established((void *)msk) &&
- READ_ONCE(msk->pm.accept_subflow);
+ ((mptcp_pm_is_userspace(msk) &&
+ mptcp_userspace_pm_active(msk)) ||
+ READ_ONCE(msk->pm.accept_subflow));
}
/* validate received token and create truncated hmac and nonce for SYN-ACK */
@@ -441,6 +443,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->backup = mp_opt.backup;
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
+ subflow->remote_id = mp_opt.join_id;
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
subflow, subflow->thmac, subflow->remote_nonce,
subflow->backup);
@@ -968,6 +971,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool csum_reqd = READ_ONCE(msk->csum_enabled);
+ struct sock *sk = (struct sock *)msk;
struct mptcp_ext *mpext;
struct sk_buff *skb;
u16 data_len;
@@ -1006,7 +1010,15 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len = mpext->data_len;
if (data_len == 0) {
+ pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+ subflow->map_data_len = 0;
+ if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
+ mptcp_data_lock(sk);
+ if (inet_sk_state_load(sk) != TCP_CLOSE)
+ sk_stop_timer(sk, &sk->sk_timer);
+ mptcp_data_unlock(sk);
+ }
return MAPPING_INVALID;
}
@@ -1203,35 +1215,45 @@ no_data:
return false;
fallback:
- /* RFC 8684 section 3.7. */
- if (subflow->send_mp_fail) {
- if (mptcp_has_another_subflow(ssk)) {
- while ((skb = skb_peek(&ssk->sk_receive_queue)))
- sk_eat_skb(ssk, skb);
+ if (!__mptcp_check_fallback(msk)) {
+ /* RFC 8684 section 3.7. */
+ if (subflow->send_mp_fail) {
+ if (mptcp_has_another_subflow(ssk) ||
+ !READ_ONCE(msk->allow_infinite_fallback)) {
+ ssk->sk_err = EBADMSG;
+ tcp_set_state(ssk, TCP_CLOSE);
+ subflow->reset_transient = 0;
+ subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ while ((skb = skb_peek(&ssk->sk_receive_queue)))
+ sk_eat_skb(ssk, skb);
+ } else {
+ WRITE_ONCE(subflow->mp_fail_response_expect, true);
+ /* The data lock is acquired in __mptcp_move_skbs() */
+ sk_reset_timer((struct sock *)msk,
+ &((struct sock *)msk)->sk_timer,
+ jiffies + TCP_RTO_MAX);
+ }
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ return true;
}
- ssk->sk_err = EBADMSG;
- tcp_set_state(ssk, TCP_CLOSE);
- subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
- tcp_send_active_reset(ssk, GFP_ATOMIC);
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
- return true;
- }
- if (subflow->mp_join || subflow->fully_established) {
- /* fatal protocol error, close the socket.
- * subflow_error_report() will introduce the appropriate barriers
- */
- ssk->sk_err = EBADMSG;
- tcp_set_state(ssk, TCP_CLOSE);
- subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMPTCP;
- tcp_send_active_reset(ssk, GFP_ATOMIC);
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
- return false;
+ if ((subflow->mp_join || subflow->fully_established) && subflow->map_data_len) {
+ /* fatal protocol error, close the socket.
+ * subflow_error_report() will introduce the appropriate barriers
+ */
+ ssk->sk_err = EBADMSG;
+ tcp_set_state(ssk, TCP_CLOSE);
+ subflow->reset_transient = 0;
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ return false;
+ }
+
+ __mptcp_do_fallback(msk);
}
- __mptcp_do_fallback(msk);
skb = skb_peek(&ssk->sk_receive_queue);
subflow->map_valid = 1;
subflow->map_seq = READ_ONCE(msk->ack_seq);
@@ -1446,7 +1468,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
if (local_id)
subflow_set_local_id(subflow, local_id);
- mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
+ mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
&flags, &ifindex);
subflow->remote_key = msk->remote_key;
subflow->local_key = msk->local_key;
@@ -1483,6 +1505,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
/* discard the subflow socket */
mptcp_sock_graft(ssk, sk->sk_socket);
iput(SOCK_INODE(sf));
+ WRITE_ONCE(msk->allow_infinite_fallback, false);
return err;
failed_unlink:
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7f645328b47f..efab2b06d373 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1767,8 +1767,6 @@ static int ip_vs_zero_all(struct netns_ipvs *ipvs)
#ifdef CONFIG_SYSCTL
-static int three = 3;
-
static int
proc_do_defense_mode(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -1977,7 +1975,7 @@ static struct ctl_table vs_vars[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &three,
+ .extra2 = SYSCTL_THREE,
},
{
.procname = "nat_icmp_send",
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index fe98673dd5ac..bc4d5cd63a94 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -38,6 +38,7 @@
* @l4proto - Layer 4 protocol
* Values:
* IPPROTO_TCP, IPPROTO_UDP
+ * @dir: - connection tracking tuple direction.
* @reserved - Reserved member, will be reused for more options in future
* Values:
* 0
@@ -46,7 +47,8 @@ struct bpf_ct_opts {
s32 netns_id;
s32 error;
u8 l4proto;
- u8 reserved[3];
+ u8 dir;
+ u8 reserved[2];
};
enum {
@@ -56,10 +58,11 @@ enum {
static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
struct bpf_sock_tuple *bpf_tuple,
u32 tuple_len, u8 protonum,
- s32 netns_id)
+ s32 netns_id, u8 *dir)
{
struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple tuple;
+ struct nf_conn *ct;
if (unlikely(protonum != IPPROTO_TCP && protonum != IPPROTO_UDP))
return ERR_PTR(-EPROTO);
@@ -99,7 +102,12 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
put_net(net);
if (!hash)
return ERR_PTR(-ENOENT);
- return nf_ct_tuplehash_to_ctrack(hash);
+
+ ct = nf_ct_tuplehash_to_ctrack(hash);
+ if (dir)
+ *dir = NF_CT_DIRECTION(hash);
+
+ return ct;
}
__diag_push();
@@ -135,13 +143,13 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
if (!opts)
return NULL;
if (!bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts->reserved[2] || opts__sz != NF_BPF_CT_OPTS_SZ) {
+ opts__sz != NF_BPF_CT_OPTS_SZ) {
opts->error = -EINVAL;
return NULL;
}
caller_net = dev_net(ctx->rxq->dev);
nfct = __bpf_nf_ct_lookup(caller_net, bpf_tuple, tuple__sz, opts->l4proto,
- opts->netns_id);
+ opts->netns_id, &opts->dir);
if (IS_ERR(nfct)) {
opts->error = PTR_ERR(nfct);
return NULL;
@@ -178,13 +186,13 @@ bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
if (!opts)
return NULL;
if (!bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts->reserved[2] || opts__sz != NF_BPF_CT_OPTS_SZ) {
+ opts__sz != NF_BPF_CT_OPTS_SZ) {
opts->error = -EINVAL;
return NULL;
}
caller_net = skb->dev ? dev_net(skb->dev) : sock_net(skb->sk);
nfct = __bpf_nf_ct_lookup(caller_net, bpf_tuple, tuple__sz, opts->l4proto,
- opts->netns_id);
+ opts->netns_id, &opts->dir);
if (IS_ERR(nfct)) {
opts->error = PTR_ERR(nfct);
return NULL;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 07e65b4e92f8..0cb2da0a759a 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -96,8 +96,8 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
static void ecache_work(struct work_struct *work)
{
- struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache_dwork.work);
- struct netns_ct *ctnet = cnet->ct_net;
+ struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
+ struct netns_ct *ctnet = cnet->ecache.ct_net;
int cpu, delay = -1;
struct ct_pcpu *pcpu;
@@ -127,7 +127,7 @@ static void ecache_work(struct work_struct *work)
ctnet->ecache_dwork_pending = delay > 0;
if (delay >= 0)
- schedule_delayed_work(&cnet->ecache_dwork, delay);
+ schedule_delayed_work(&cnet->ecache.dwork, delay);
}
static int __nf_conntrack_eventmask_report(struct nf_conntrack_ecache *e,
@@ -293,12 +293,12 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (state == NFCT_ECACHE_DESTROY_FAIL &&
- !delayed_work_pending(&cnet->ecache_dwork)) {
- schedule_delayed_work(&cnet->ecache_dwork, HZ);
+ !delayed_work_pending(&cnet->ecache.dwork)) {
+ schedule_delayed_work(&cnet->ecache.dwork, HZ);
net->ct.ecache_dwork_pending = true;
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
net->ct.ecache_dwork_pending = false;
- mod_delayed_work(system_wq, &cnet->ecache_dwork, 0);
+ mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
}
}
@@ -310,8 +310,9 @@ void nf_conntrack_ecache_pernet_init(struct net *net)
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
net->ct.sysctl_events = nf_ct_events;
- cnet->ct_net = &net->ct;
- INIT_DELAYED_WORK(&cnet->ecache_dwork, ecache_work);
+
+ cnet->ecache.ct_net = &net->ct;
+ INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */
}
@@ -320,5 +321,5 @@ void nf_conntrack_ecache_pernet_fini(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
- cancel_delayed_work_sync(&cnet->ecache_dwork);
+ cancel_delayed_work_sync(&cnet->ecache.dwork);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1ea2ad732d57..924d766e6c53 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1708,6 +1708,47 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
return 0;
}
+static int ctnetlink_dump_one_entry(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nf_conn *ct,
+ bool dying)
+{
+ struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
+ struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ u8 l3proto = nfmsg->nfgen_family;
+ int res;
+
+ if (l3proto && nf_ct_l3num(ct) != l3proto)
+ return 0;
+
+ if (ctx->last) {
+ if (ct != ctx->last)
+ return 0;
+
+ ctx->last = NULL;
+ }
+
+ /* We can't dump extension info for the unconfirmed
+ * list because unconfirmed conntracks can have
+ * ct->ext reallocated (and thus freed).
+ *
+ * In the dying list case ct->ext can't be free'd
+ * until after we drop pcpu->lock.
+ */
+ res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ ct, dying, 0);
+ if (res < 0) {
+ if (!refcount_inc_not_zero(&ct->ct_general.use))
+ return 0;
+
+ ctx->last = ct;
+ }
+
+ return res;
+}
+
static int
ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
{
@@ -1715,12 +1756,9 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- u_int8_t l3proto = nfmsg->nfgen_family;
- int res;
- int cpu;
struct hlist_nulls_head *list;
struct net *net = sock_net(skb->sk);
+ int res, cpu;
if (ctx->done)
return 0;
@@ -1739,30 +1777,10 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
restart:
hlist_nulls_for_each_entry(h, n, list, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
- if (l3proto && nf_ct_l3num(ct) != l3proto)
- continue;
- if (ctx->last) {
- if (ct != last)
- continue;
- ctx->last = NULL;
- }
- /* We can't dump extension info for the unconfirmed
- * list because unconfirmed conntracks can have
- * ct->ext reallocated (and thus freed).
- *
- * In the dying list case ct->ext can't be free'd
- * until after we drop pcpu->lock.
- */
- res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
- ct, dying, 0);
+ res = ctnetlink_dump_one_entry(skb, cb, ct, dying);
if (res < 0) {
- if (!refcount_inc_not_zero(&ct->ct_general.use))
- continue;
ctx->cpu = cpu;
- ctx->last = ct;
spin_unlock_bh(&pcpu->lock);
goto out;
}
diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
index 13234641cdb3..77bcb10fc586 100644
--- a/net/netfilter/nf_log_syslog.c
+++ b/net/netfilter/nf_log_syslog.c
@@ -40,6 +40,12 @@ struct arppayload {
unsigned char ip_dst[4];
};
+/* Guard against containers flooding syslog. */
+static bool nf_log_allowed(const struct net *net)
+{
+ return net_eq(net, &init_net) || sysctl_nf_log_all_netns;
+}
+
static void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
{
u16 vid;
@@ -133,8 +139,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
{
struct nf_log_buf *m;
- /* FIXME: Disabled from containers until syslog ns is supported */
- if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
+ if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
@@ -766,9 +771,9 @@ dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
}
-static void dump_ipv4_mac_header(struct nf_log_buf *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb)
+static void dump_mac_header(struct nf_log_buf *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
@@ -798,9 +803,26 @@ fallback:
const unsigned char *p = skb_mac_header(skb);
unsigned int i;
- nf_log_buf_add(m, "%02x", *p++);
- for (i = 1; i < dev->hard_header_len; i++, p++)
- nf_log_buf_add(m, ":%02x", *p);
+ if (dev->type == ARPHRD_SIT) {
+ p -= ETH_HLEN;
+
+ if (p < skb->head)
+ p = NULL;
+ }
+
+ if (p) {
+ nf_log_buf_add(m, "%02x", *p++);
+ for (i = 1; i < dev->hard_header_len; i++)
+ nf_log_buf_add(m, ":%02x", *p++);
+ }
+
+ if (dev->type == ARPHRD_SIT) {
+ const struct iphdr *iph =
+ (struct iphdr *)skb_mac_header(skb);
+
+ nf_log_buf_add(m, " TUNNEL=%pI4->%pI4", &iph->saddr,
+ &iph->daddr);
+ }
}
nf_log_buf_add(m, " ");
}
@@ -814,8 +836,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
{
struct nf_log_buf *m;
- /* FIXME: Disabled from containers until syslog ns is supported */
- if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
+ if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
@@ -827,7 +848,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
out, loginfo, prefix);
if (in)
- dump_ipv4_mac_header(m, loginfo, skb);
+ dump_mac_header(m, loginfo, skb);
dump_ipv4_packet(net, m, loginfo, skb, 0);
@@ -841,64 +862,6 @@ static struct nf_logger nf_ip_logger __read_mostly = {
.me = THIS_MODULE,
};
-static void dump_ipv6_mac_header(struct nf_log_buf *m,
- const struct nf_loginfo *info,
- const struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- unsigned int logflags = 0;
-
- if (info->type == NF_LOG_TYPE_LOG)
- logflags = info->u.log.logflags;
-
- if (!(logflags & NF_LOG_MACDECODE))
- goto fallback;
-
- switch (dev->type) {
- case ARPHRD_ETHER:
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
- nf_log_dump_vlan(m, skb);
- nf_log_buf_add(m, "MACPROTO=%04x ",
- ntohs(eth_hdr(skb)->h_proto));
- return;
- default:
- break;
- }
-
-fallback:
- nf_log_buf_add(m, "MAC=");
- if (dev->hard_header_len &&
- skb->mac_header != skb->network_header) {
- const unsigned char *p = skb_mac_header(skb);
- unsigned int len = dev->hard_header_len;
- unsigned int i;
-
- if (dev->type == ARPHRD_SIT) {
- p -= ETH_HLEN;
-
- if (p < skb->head)
- p = NULL;
- }
-
- if (p) {
- nf_log_buf_add(m, "%02x", *p++);
- for (i = 1; i < len; i++)
- nf_log_buf_add(m, ":%02x", *p++);
- }
- nf_log_buf_add(m, " ");
-
- if (dev->type == ARPHRD_SIT) {
- const struct iphdr *iph =
- (struct iphdr *)skb_mac_header(skb);
- nf_log_buf_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
- &iph->daddr);
- }
- } else {
- nf_log_buf_add(m, " ");
- }
-}
-
static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
@@ -908,8 +871,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
{
struct nf_log_buf *m;
- /* FIXME: Disabled from containers until syslog ns is supported */
- if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
+ if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
@@ -921,7 +883,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
loginfo, prefix);
if (in)
- dump_ipv6_mac_header(m, loginfo, skb);
+ dump_mac_header(m, loginfo, skb);
dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
@@ -935,6 +897,32 @@ static struct nf_logger nf_ip6_logger __read_mostly = {
.me = THIS_MODULE,
};
+static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ struct nf_log_buf *m;
+
+ if (!nf_log_allowed(net))
+ return;
+
+ m = nf_log_buf_open();
+
+ if (!loginfo)
+ loginfo = &default_loginfo;
+
+ nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+ prefix);
+
+ dump_mac_header(m, loginfo, skb);
+
+ nf_log_buf_close(m);
+}
+
static void nf_log_netdev_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
@@ -954,6 +942,10 @@ static void nf_log_netdev_packet(struct net *net, u_int8_t pf,
case htons(ETH_P_RARP):
nf_log_arp_packet(net, pf, hooknum, skb, in, out, loginfo, prefix);
break;
+ default:
+ nf_log_unknown_packet(net, pf, hooknum, skb,
+ in, out, loginfo, prefix);
+ break;
}
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 16c3a39689f4..f3ad02a399f8 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8367,10 +8367,8 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
if (chain->blob_next || !nft_is_active_next(net, chain))
return 0;
- rule = list_entry(&chain->rules, struct nft_rule, list);
-
data_size = 0;
- list_for_each_entry_continue(rule, &chain->rules, list) {
+ list_for_each_entry(rule, &chain->rules, list) {
if (nft_is_active_next(net, rule)) {
data_size += sizeof(*prule) + rule->dlen;
if (data_size > INT_MAX)
@@ -8387,7 +8385,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
data_boundary = data + data_size;
size = 0;
- list_for_each_entry_continue(rule, &chain->rules, list) {
+ list_for_each_entry(rule, &chain->rules, list) {
if (!nft_is_active_next(net, rule))
continue;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index b0d8888a539b..eea486f32971 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -158,6 +158,7 @@ static int cttimeout_new_timeout(struct sk_buff *skb,
timeout->timeout.l3num = l3num;
timeout->timeout.l4proto = l4proto;
refcount_set(&timeout->refcnt, 1);
+ __module_get(THIS_MODULE);
list_add_tail_rcu(&timeout->head, &pernet->nfct_timeout_list);
return 0;
@@ -506,13 +507,8 @@ static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
- if (!try_module_get(THIS_MODULE))
+ if (!refcount_inc_not_zero(&timeout->refcnt))
goto err;
-
- if (!refcount_inc_not_zero(&timeout->refcnt)) {
- module_put(THIS_MODULE);
- goto err;
- }
matching = timeout;
break;
}
@@ -525,10 +521,10 @@ static void ctnl_timeout_put(struct nf_ct_timeout *t)
struct ctnl_timeout *timeout =
container_of(t, struct ctnl_timeout, timeout);
- if (refcount_dec_and_test(&timeout->refcnt))
+ if (refcount_dec_and_test(&timeout->refcnt)) {
kfree_rcu(timeout, rcu_head);
-
- module_put(THIS_MODULE);
+ module_put(THIS_MODULE);
+ }
}
static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index f590ee1c8a1b..83590afe3768 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -30,7 +30,7 @@ static void nft_bitwise_eval_bool(u32 *dst, const u32 *src,
{
unsigned int i;
- for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
+ for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++)
dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
}
@@ -109,22 +109,23 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv,
return err;
if (mask.type != NFT_DATA_VALUE || mask.len != priv->len) {
err = -EINVAL;
- goto err1;
+ goto err_mask_release;
}
err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &xor,
tb[NFTA_BITWISE_XOR]);
if (err < 0)
- goto err1;
+ goto err_mask_release;
if (xor.type != NFT_DATA_VALUE || xor.len != priv->len) {
err = -EINVAL;
- goto err2;
+ goto err_xor_release;
}
return 0;
-err2:
+
+err_xor_release:
nft_data_release(&priv->xor, xor.type);
-err1:
+err_mask_release:
nft_data_release(&priv->mask, mask.type);
return err;
}
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index f198f2d9ef90..1f12d7ade606 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -35,6 +35,10 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
hooks = (1 << NF_INET_PRE_ROUTING);
+ if (priv->flags & NFTA_FIB_F_IIF) {
+ hooks |= (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
+ }
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 05a3795eac8e..1b5a9c2e1c29 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1931,7 +1931,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int noblock = flags & MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
int err, ret;
@@ -1941,7 +1940,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
copied = 0;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa9dc2ba3941..6f7f4392cffb 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1159,7 +1159,8 @@ static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
/* Now we can treat all alike */
- if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
+ skb = skb_recv_datagram(sk, flags, &er);
+ if (!skb) {
release_sock(sk);
return er;
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 5b286e1e0a6f..6ff3e10ff8e3 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1166,6 +1166,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
if (dev->rfkill) {
rfkill_unregister(dev->rfkill);
rfkill_destroy(dev->rfkill);
+ dev->rfkill = NULL;
}
dev->shutting_down = true;
device_unlock(&dev->dev);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 4ca35791c93b..77642d18a3b4 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -821,7 +821,6 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
unsigned int copied, rlen;
struct sk_buff *skb, *cskb;
@@ -842,7 +841,7 @@ static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
pr_err("Recv datagram failed state %d %d %d",
sk->sk_state, err, sock_error(sk));
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 0ca214ab5aef..8dd569765f96 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -238,7 +238,6 @@ static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
@@ -246,7 +245,7 @@ static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
- skb = skb_recv_datagram(sk, flags, noblock, &rc);
+ skb = skb_recv_datagram(sk, flags, &rc);
if (!skb)
return rc;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 002d2b9c69dd..677f9cfa9660 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1924,12 +1924,20 @@ oom:
static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
{
+ int depth;
+
if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
sock->type == SOCK_RAW) {
skb_reset_mac_header(skb);
skb->protocol = dev_parse_header_protocol(skb);
}
+ /* Move network header to the right position for VLAN tagged packets */
+ if (likely(skb->dev->type == ARPHRD_ETHER) &&
+ eth_type_vlan(skb->protocol) &&
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
skb_probe_transport_header(skb);
}
@@ -3047,6 +3055,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
+ if (unlikely(extra_len == 4))
+ skb->no_fcs = 1;
+
+ packet_parse_headers(skb, sock);
+
if (has_vnet_hdr) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
if (err)
@@ -3055,11 +3068,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
- packet_parse_headers(skb, sock);
-
- if (unlikely(extra_len == 4))
- skb->no_fcs = 1;
-
err = po->xmit(skb);
if (unlikely(err != 0)) {
if (err > 0)
@@ -3426,7 +3434,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
* but then it will block.
*/
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
@@ -3469,7 +3477,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sll->sll_protocol = skb->protocol;
}
- sock_recv_ts_and_drops(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
const size_t max_len = min(sizeof(skb->cb),
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 393e6aa7a592..ff5f49ab236e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -112,7 +112,7 @@ static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct sk_buff *skb = NULL;
struct sockaddr_pn sa;
@@ -123,7 +123,7 @@ static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
MSG_CMSG_COMPAT))
goto out_nofree;
- skb = skb_recv_datagram(sk, flags, noblock, &rval);
+ skb = skb_recv_datagram(sk, flags, &rval);
if (skb == NULL)
goto out_nofree;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 65d463ad8770..83ea13a50690 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
- skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
+ skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ errp);
if (!skb)
return NULL;
@@ -1238,7 +1239,7 @@ struct sk_buff *pep_read(struct sock *sk)
}
static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct sk_buff *skb;
int err;
@@ -1267,7 +1268,7 @@ static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return -EINVAL;
}
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
lock_sock(sk);
if (skb == NULL) {
if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index ec2322529727..5c2fb992803b 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -1035,8 +1035,7 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
return -EADDRNOTAVAIL;
}
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &rc);
+ skb = skb_recv_datagram(sk, flags, &rc);
if (!skb) {
release_sock(sk);
return rc;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 30a1cf4c16c6..bf2d986a6bc3 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1230,7 +1230,8 @@ static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
return -ENOTCONN;
/* Now we can treat all alike */
- if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
+ skb = skb_recv_datagram(sk, flags, &er);
+ if (!skb)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 4f51094da9da..da9733da9868 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -195,7 +195,7 @@ static int offload_action_init(struct flow_offload_action *fl_action,
if (act->ops->offload_act_setup) {
spin_lock_bh(&act->tcfa_lock);
err = act->ops->offload_act_setup(act, fl_action, NULL,
- false);
+ false, extack);
spin_unlock_bh(&act->tcfa_lock);
return err;
}
@@ -271,7 +271,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action,
if (err)
goto fl_err;
- err = tc_setup_action(&fl_action->action, actions);
+ err = tc_setup_action(&fl_action->action, actions, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to setup tc actions for offload");
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index e0f515b774ca..22847ee009ef 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -696,7 +696,8 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
}
static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b1f502fce595..8af9d6e5ba61 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1584,7 +1584,8 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
}
static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index bde6a6c01e64..ac29d1065232 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -253,7 +253,8 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
}
static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -267,7 +268,17 @@ static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data,
} else if (is_tcf_gact_goto_chain(act)) {
entry->id = FLOW_ACTION_GOTO;
entry->chain_index = tcf_gact_goto_chain_index(act);
+ } else if (is_tcf_gact_continue(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload of \"continue\" action is not supported");
+ return -EOPNOTSUPP;
+ } else if (is_tcf_gact_reclassify(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload of \"reclassify\" action is not supported");
+ return -EOPNOTSUPP;
+ } else if (is_tcf_gact_pipe(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload of \"pipe\" action is not supported");
+ return -EOPNOTSUPP;
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported generic action offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index d56e73843a4b..fd5155274733 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -619,7 +619,8 @@ static int tcf_gate_get_entries(struct flow_action_entry *entry,
}
static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
int err;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 39acd1d18609..ebb92fb072ab 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -460,7 +460,8 @@ static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
}
static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -478,6 +479,7 @@ static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
entry->id = FLOW_ACTION_MIRRED_INGRESS;
tcf_offload_mirred_get_dev(entry, act);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index b9ff3459fdab..adabeccb63e1 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -385,7 +385,8 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index)
}
static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -410,7 +411,14 @@ static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
entry->mpls_mangle.bos = tcf_mpls_bos(act);
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
+ case TCA_MPLS_ACT_DEC_TTL:
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"dec_ttl\" option is used");
+ return -EOPNOTSUPP;
+ case TCA_MPLS_ACT_MAC_PUSH:
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"mac_push\" option is used");
+ return -EOPNOTSUPP;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported MPLS mode offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 31fcd279c177..e01ef7f109f4 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -488,7 +488,8 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
}
static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -503,6 +504,7 @@ static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
entry->id = FLOW_ACTION_ADD;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload");
return -EOPNOTSUPP;
}
entry->mangle.htype = tcf_pedit_htype(act, k);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f4d917705263..79c8901f66ab 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -419,7 +419,8 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
-static int tcf_police_act_to_flow_act(int tc_act, u32 *extval)
+static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
+ struct netlink_ext_ack *extack)
{
int act_id = -EOPNOTSUPP;
@@ -430,19 +431,28 @@ static int tcf_police_act_to_flow_act(int tc_act, u32 *extval)
act_id = FLOW_ACTION_DROP;
else if (tc_act == TC_ACT_PIPE)
act_id = FLOW_ACTION_PIPE;
+ else if (tc_act == TC_ACT_RECLASSIFY)
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\"");
+ else
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
} else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
act_id = FLOW_ACTION_GOTO;
*extval = tc_act & TC_ACT_EXT_VAL_MASK;
} else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
act_id = FLOW_ACTION_JUMP;
*extval = tc_act & TC_ACT_EXT_VAL_MASK;
+ } else if (tc_act == TC_ACT_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"continue\"");
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
}
return act_id;
}
static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -466,14 +476,16 @@ static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
entry->police.mtu = tcf_police_tcfp_mtu(act);
act_id = tcf_police_act_to_flow_act(police->tcf_action,
- &entry->police.exceed.extval);
+ &entry->police.exceed.extval,
+ extack);
if (act_id < 0)
return act_id;
entry->police.exceed.act_id = act_id;
act_id = tcf_police_act_to_flow_act(p->tcfp_result,
- &entry->police.notexceed.extval);
+ &entry->police.notexceed.extval,
+ extack);
if (act_id < 0)
return act_id;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 9a22cdda6bbd..2f7f5e44d28c 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -291,7 +291,8 @@ static void tcf_offload_sample_get_group(struct flow_action_entry *entry,
}
static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index ceba11b198bb..e3bd11dfe1ca 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -23,6 +23,20 @@
static unsigned int skbedit_net_id;
static struct tc_action_ops act_skbedit_ops;
+static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
+ struct sk_buff *skb)
+{
+ u16 queue_mapping = params->queue_mapping;
+
+ if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
+ u32 hash = skb_get_hash(skb);
+
+ queue_mapping += hash % params->mapping_mod;
+ }
+
+ return netdev_cap_txqueue(skb->dev, queue_mapping);
+}
+
static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -58,8 +72,12 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
}
}
if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
- skb->dev->real_num_tx_queues > params->queue_mapping)
- skb_set_queue_mapping(skb, params->queue_mapping);
+ skb->dev->real_num_tx_queues > params->queue_mapping) {
+#ifdef CONFIG_NET_EGRESS
+ netdev_xmit_skip_txqueue(true);
+#endif
+ skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
+ }
if (params->flags & SKBEDIT_F_MARK) {
skb->mark &= ~params->mask;
skb->mark |= params->mark & params->mask;
@@ -92,6 +110,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
[TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
[TCA_SKBEDIT_MASK] = { .len = sizeof(u32) },
[TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) },
+ [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) },
};
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
@@ -108,6 +127,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct tcf_skbedit *d;
u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
u16 *queue_mapping = NULL, *ptype = NULL;
+ u16 mapping_mod = 1;
bool exists = false;
int ret = 0, err;
u32 index;
@@ -153,6 +173,25 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
+ if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) {
+ u16 *queue_mapping_max;
+
+ if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] ||
+ !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping.");
+ return -EINVAL;
+ }
+
+ queue_mapping_max =
+ nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]);
+ if (*queue_mapping_max < *queue_mapping) {
+ NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min.");
+ return -EINVAL;
+ }
+
+ mapping_mod = *queue_mapping_max - *queue_mapping + 1;
+ flags |= SKBEDIT_F_TXQ_SKBHASH;
+ }
if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
flags |= SKBEDIT_F_INHERITDSFIELD;
}
@@ -204,8 +243,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
params_new->flags = flags;
if (flags & SKBEDIT_F_PRIORITY)
params_new->priority = *priority;
- if (flags & SKBEDIT_F_QUEUE_MAPPING)
+ if (flags & SKBEDIT_F_QUEUE_MAPPING) {
params_new->queue_mapping = *queue_mapping;
+ params_new->mapping_mod = mapping_mod;
+ }
if (flags & SKBEDIT_F_MARK)
params_new->mark = *mark;
if (flags & SKBEDIT_F_PTYPE)
@@ -272,6 +313,13 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
goto nla_put_failure;
if (params->flags & SKBEDIT_F_INHERITDSFIELD)
pure_flags |= SKBEDIT_F_INHERITDSFIELD;
+ if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
+ if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX,
+ params->queue_mapping + params->mapping_mod - 1))
+ goto nla_put_failure;
+
+ pure_flags |= SKBEDIT_F_TXQ_SKBHASH;
+ }
if (pure_flags != 0 &&
nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
goto nla_put_failure;
@@ -321,6 +369,7 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_skbedit))
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
+ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
@@ -328,7 +377,8 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
}
static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -342,7 +392,14 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data
} else if (is_tcf_skbedit_priority(act)) {
entry->id = FLOW_ACTION_PRIORITY;
entry->priority = tcf_skbedit_priority(act);
+ } else if (is_tcf_skbedit_queue_mapping(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used");
+ return -EOPNOTSUPP;
+ } else if (is_tcf_skbedit_inheritdsfield(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
+ return -EOPNOTSUPP;
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 23aba03d26a8..856dc23cef8c 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -808,7 +808,8 @@ static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
void *entry_data,
u32 *index_inc,
- bool bind)
+ bool bind,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -823,6 +824,7 @@ static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
} else if (is_tcf_tunnel_release(act)) {
entry->id = FLOW_ACTION_TUNNEL_DECAP;
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 883454c4f921..68b5e772386a 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -369,7 +369,8 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
}
static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
- u32 *index_inc, bool bind)
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack)
{
if (bind) {
struct flow_action_entry *entry = entry_data;
@@ -398,6 +399,7 @@ static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
tcf_vlan_push_eth(entry->vlan_push_eth.src, entry->vlan_push_eth.dst, act);
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported vlan action mode offload");
return -EOPNOTSUPP;
}
*index_inc = 1;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f0699f39afdb..9bb4d3dcc994 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3513,20 +3513,25 @@ EXPORT_SYMBOL(tc_cleanup_offload_action);
static int tc_setup_offload_act(struct tc_action *act,
struct flow_action_entry *entry,
- u32 *index_inc)
+ u32 *index_inc,
+ struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
- if (act->ops->offload_act_setup)
- return act->ops->offload_act_setup(act, entry, index_inc, true);
- else
+ if (act->ops->offload_act_setup) {
+ return act->ops->offload_act_setup(act, entry, index_inc, true,
+ extack);
+ } else {
+ NL_SET_ERR_MSG(extack, "Action does not support offload");
return -EOPNOTSUPP;
+ }
#else
return 0;
#endif
}
int tc_setup_action(struct flow_action *flow_action,
- struct tc_action *actions[])
+ struct tc_action *actions[],
+ struct netlink_ext_ack *extack)
{
int i, j, index, err = 0;
struct tc_action *act;
@@ -3551,7 +3556,7 @@ int tc_setup_action(struct flow_action *flow_action,
entry->hw_stats = tc_act_hw_stats(act->hw_stats);
entry->hw_index = act->tcfa_index;
index = 0;
- err = tc_setup_offload_act(act, entry, &index);
+ err = tc_setup_offload_act(act, entry, &index, extack);
if (!err)
j += index;
else
@@ -3570,13 +3575,14 @@ err_out_locked:
}
int tc_setup_offload_action(struct flow_action *flow_action,
- const struct tcf_exts *exts)
+ const struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
if (!exts)
return 0;
- return tc_setup_action(flow_action, exts->actions);
+ return tc_setup_action(flow_action, exts->actions, extack);
#else
return 0;
#endif
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index ed5e6f08e74a..dcca70144dff 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -72,6 +72,7 @@ struct fl_flow_key {
} tp_range;
struct flow_dissector_key_ct ct;
struct flow_dissector_key_hash hash;
+ struct flow_dissector_key_num_of_vlans num_of_vlans;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -464,14 +465,12 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
- err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
+ cls_flower.common.extack);
if (err) {
kfree(cls_flower.rule);
- if (skip_sw) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
- return err;
- }
- return 0;
+
+ return skip_sw ? err : 0;
}
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
@@ -714,6 +713,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
[TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
[TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
};
@@ -1030,8 +1030,10 @@ static void fl_set_key_vlan(struct nlattr **tb,
VLAN_PRIORITY_MASK;
key_mask->vlan_priority = VLAN_PRIORITY_MASK;
}
- key_val->vlan_tpid = ethertype;
- key_mask->vlan_tpid = cpu_to_be16(~0);
+ if (ethertype) {
+ key_val->vlan_tpid = ethertype;
+ key_mask->vlan_tpid = cpu_to_be16(~0);
+ }
if (tb[vlan_next_eth_type_key]) {
key_val->vlan_eth_type =
nla_get_be16(tb[vlan_next_eth_type_key]);
@@ -1581,6 +1583,26 @@ static int fl_set_key_ct(struct nlattr **tb,
return 0;
}
+static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
+ struct fl_flow_key *key, struct fl_flow_key *mask,
+ int vthresh)
+{
+ const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
+
+ if (!tb) {
+ *ethertype = 0;
+ return good_num_of_vlans;
+ }
+
+ *ethertype = nla_get_be16(tb);
+ if (good_num_of_vlans || eth_type_vlan(*ethertype))
+ return true;
+
+ key->basic.n_proto = *ethertype;
+ mask->basic.n_proto = cpu_to_be16(~0);
+ return false;
+}
+
static int fl_set_key(struct net *net, struct nlattr **tb,
struct fl_flow_key *key, struct fl_flow_key *mask,
struct netlink_ext_ack *extack)
@@ -1602,37 +1624,30 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
sizeof(key->eth.src));
-
- if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
- ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
-
- if (eth_type_vlan(ethertype)) {
- fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
- TCA_FLOWER_KEY_VLAN_PRIO,
- TCA_FLOWER_KEY_VLAN_ETH_TYPE,
- &key->vlan, &mask->vlan);
-
- if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
- ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
- if (eth_type_vlan(ethertype)) {
- fl_set_key_vlan(tb, ethertype,
- TCA_FLOWER_KEY_CVLAN_ID,
- TCA_FLOWER_KEY_CVLAN_PRIO,
- TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
- &key->cvlan, &mask->cvlan);
- fl_set_key_val(tb, &key->basic.n_proto,
- TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
- &mask->basic.n_proto,
- TCA_FLOWER_UNSPEC,
- sizeof(key->basic.n_proto));
- } else {
- key->basic.n_proto = ethertype;
- mask->basic.n_proto = cpu_to_be16(~0);
- }
- }
- } else {
- key->basic.n_proto = ethertype;
- mask->basic.n_proto = cpu_to_be16(~0);
+ fl_set_key_val(tb, &key->num_of_vlans,
+ TCA_FLOWER_KEY_NUM_OF_VLANS,
+ &mask->num_of_vlans,
+ TCA_FLOWER_UNSPEC,
+ sizeof(key->num_of_vlans));
+
+ if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
+ fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
+ TCA_FLOWER_KEY_VLAN_PRIO,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ &key->vlan, &mask->vlan);
+
+ if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
+ &ethertype, key, mask, 1)) {
+ fl_set_key_vlan(tb, ethertype,
+ TCA_FLOWER_KEY_CVLAN_ID,
+ TCA_FLOWER_KEY_CVLAN_PRIO,
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ &key->cvlan, &mask->cvlan);
+ fl_set_key_val(tb, &key->basic.n_proto,
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ &mask->basic.n_proto,
+ TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.n_proto));
}
}
@@ -1906,6 +1921,8 @@ static void fl_init_dissector(struct flow_dissector *dissector,
FLOW_DISSECTOR_KEY_CT, ct);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_HASH, hash);
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
skb_flow_dissector_init(dissector, keys, cnt);
}
@@ -2362,11 +2379,11 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
cls_flower.rule->match.mask = &f->mask->key;
cls_flower.rule->match.key = &f->mkey;
- err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
+ cls_flower.common.extack);
if (err) {
kfree(cls_flower.rule);
if (tc_skip_sw(f->flags)) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
__fl_put(f);
return err;
}
@@ -2994,6 +3011,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
sizeof(key->basic.n_proto)))
goto nla_put_failure;
+ if (mask->num_of_vlans.num_of_vlans) {
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
+ goto nla_put_failure;
+ }
+
if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
goto nla_put_failure;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index ca5670fd5228..06cf22adbab7 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -97,16 +97,13 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.cookie = cookie;
- err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
+ cls_mall.common.extack);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
- if (skip_sw)
- NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
- else
- err = 0;
- return err;
+ return skip_sw ? err : 0;
}
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
@@ -302,14 +299,12 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = (unsigned long)head;
- err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
+ cls_mall.common.extack);
if (err) {
kfree(cls_mall.rule);
- if (add && tc_skip_sw(head->flags)) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
- return err;
- }
- return 0;
+
+ return add && tc_skip_sw(head->flags) ? err : 0;
}
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5bab9f8b8f45..dba0b3e24af5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1019,22 +1019,14 @@ EXPORT_SYMBOL(qdisc_create_dflt);
void qdisc_reset(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
- struct sk_buff *skb, *tmp;
trace_qdisc_reset(qdisc);
if (ops->reset)
ops->reset(qdisc);
- skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
- __skb_unlink(skb, &qdisc->gso_skb);
- kfree_skb_list(skb);
- }
-
- skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
- __skb_unlink(skb, &qdisc->skb_bad_txq);
- kfree_skb_list(skb);
- }
+ __skb_queue_purge(&qdisc->gso_skb);
+ __skb_queue_purge(&qdisc->skb_bad_txq);
qdisc->q.qlen = 0;
qdisc->qstats.backlog = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 470dbdc27d58..d081858c2d07 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -926,7 +926,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
return 1;
/* v4-mapped-v6 addresses */
case AF_INET:
- if (!__ipv6_only_sock(sctp_opt2sk(sp)))
+ if (!ipv6_only_sock(sctp_opt2sk(sp)))
return 1;
fallthrough;
default:
@@ -952,7 +952,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
return 0;
/* If the socket is IPv6 only, v4 addrs will not match */
- if (__ipv6_only_sock(sk) && af1 != af2)
+ if (ipv6_only_sock(sk) && af1 != af2)
return 0;
/* Today, wildcard AF_INET/AF_INET6. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7b0427658056..6d37d2dfb3da 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2084,7 +2084,7 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
* 5 for complete description of the flags.
*/
static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct sctp_ulpevent *event = NULL;
struct sctp_sock *sp = sctp_sk(sk);
@@ -2093,9 +2093,8 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int err = 0;
int skb_len;
- pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
- "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
- addr_len);
+ pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n",
+ __func__, sk, msg, len, flags, addr_len);
lock_sock(sk);
@@ -2105,7 +2104,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out;
}
- skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
+ skb = sctp_skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
@@ -2129,7 +2128,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
head_skb = event->chunk->head_skb;
else
head_skb = skb;
- sock_recv_ts_and_drops(msg, sk, head_skb);
+ sock_recv_cmsgs(msg, sk, head_skb);
if (sctp_ulpevent_is_notification(event)) {
msg->msg_flags |= MSG_NOTIFICATION;
sp->pf->event_msgname(event, msg->msg_name, addr_len);
@@ -8978,14 +8977,13 @@ out:
* Note: This is pretty much the same routine as in core/datagram.c
* with a few changes to make lksctp work.
*/
-struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
- int noblock, int *err)
+struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err)
{
int error;
struct sk_buff *skb;
long timeo;
- timeo = sock_rcvtimeo(sk, noblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
MAX_SCHEDULE_TIMEOUT);
@@ -9018,7 +9016,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
break;
if (sk_can_busy_loop(sk)) {
- sk_busy_loop(sk, noblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
continue;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 0c3d2b4d7321..8920ca92a011 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -1063,7 +1063,7 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
struct sk_buff *skb;
int err;
- skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err);
+ skb = sctp_skb_recv_datagram(sk, MSG_PEEK | MSG_DONTWAIT, &err);
if (skb != NULL) {
__sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb),
msghdr, skb);
diff --git a/net/socket.c b/net/socket.c
index 6887840682bb..f0c39c874665 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -930,13 +930,22 @@ static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount);
}
-void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb)
+static void sock_recv_mark(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (sock_flag(sk, SOCK_RCVMARK) && skb)
+ put_cmsg(msg, SOL_SOCKET, SO_MARK, sizeof(__u32),
+ &skb->mark);
+}
+
+void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
{
sock_recv_timestamp(msg, sk, skb);
sock_recv_drops(msg, sk, skb);
+ sock_recv_mark(msg, sk, skb);
}
-EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
+EXPORT_SYMBOL_GPL(__sock_recv_cmsgs);
INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *,
size_t, int));
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cc35ec433400..45336e68bf79 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -464,7 +464,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
0, 0, MSG_PEEK | MSG_DONTWAIT);
if (err < 0)
goto out_recv_err;
- skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
+ skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err);
if (!skb)
goto out_recv_err;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 8ab64ea46870..5c91c5457197 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1355,7 +1355,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
if (sk == NULL)
goto out;
for (;;) {
- skb = skb_recv_udp(sk, 0, 1, &err);
+ skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
if (skb == NULL)
break;
xs_udp_data_read_skb(&transport->xprt, sk, skb);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index af875ad4a822..b12f81a2b44c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -964,11 +964,9 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
tls_ctx->rx.rec_seq, rxm->full_len,
is_encrypted, is_decrypted);
- ctx->sw.decrypted |= is_decrypted;
-
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
if (likely(is_encrypted || is_decrypted))
- return 0;
+ return is_decrypted;
/* After tls_device_down disables the offload, the next SKB will
* likely have initial fragments decrypted, and final ones not
@@ -983,7 +981,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
*/
if (is_decrypted) {
ctx->resync_nh_reset = 1;
- return 0;
+ return is_decrypted;
}
if (is_encrypted) {
tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index a8976ef95528..939d1673f508 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -44,6 +44,11 @@
#include <net/strparser.h>
#include <net/tls.h>
+struct tls_decrypt_arg {
+ bool zc;
+ bool async;
+};
+
noinline void tls_err_abort(struct sock *sk, int err)
{
WARN_ON_ONCE(err >= 0);
@@ -128,32 +133,31 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0);
}
-static int padding_length(struct tls_sw_context_rx *ctx,
- struct tls_prot_info *prot, struct sk_buff *skb)
+static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
+ struct tls_msg *tlm = tls_msg(skb);
int sub = 0;
/* Determine zero-padding length */
if (prot->version == TLS_1_3_VERSION) {
+ int offset = rxm->full_len - TLS_TAG_SIZE - 1;
char content_type = 0;
int err;
- int back = 17;
while (content_type == 0) {
- if (back > rxm->full_len - prot->prepend_size)
+ if (offset < prot->prepend_size)
return -EBADMSG;
- err = skb_copy_bits(skb,
- rxm->offset + rxm->full_len - back,
+ err = skb_copy_bits(skb, rxm->offset + offset,
&content_type, 1);
if (err)
return err;
if (content_type)
break;
sub++;
- back++;
+ offset--;
}
- ctx->control = content_type;
+ tlm->control = content_type;
}
return sub;
}
@@ -169,7 +173,6 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
struct scatterlist *sg;
struct sk_buff *skb;
unsigned int pages;
- int pending;
skb = (struct sk_buff *)req->data;
tls_ctx = tls_get_ctx(skb->sk);
@@ -185,17 +188,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
tls_err_abort(skb->sk, err);
} else {
struct strp_msg *rxm = strp_msg(skb);
- int pad;
- pad = padding_length(ctx, prot, skb);
- if (pad < 0) {
- ctx->async_wait.err = pad;
- tls_err_abort(skb->sk, pad);
- } else {
- rxm->full_len -= pad;
- rxm->offset += prot->prepend_size;
- rxm->full_len -= prot->overhead_size;
- }
+ /* No TLS 1.3 support with async crypto */
+ WARN_ON(prot->tail_size);
+
+ rxm->offset += prot->prepend_size;
+ rxm->full_len -= prot->overhead_size;
}
/* After using skb->sk to propagate sk through crypto async callback
@@ -217,9 +215,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
kfree(aead_req);
spin_lock_bh(&ctx->decrypt_compl_lock);
- pending = atomic_dec_return(&ctx->decrypt_pending);
-
- if (!pending && ctx->async_notify)
+ if (!atomic_dec_return(&ctx->decrypt_pending))
complete(&ctx->async_wait.completion);
spin_unlock_bh(&ctx->decrypt_compl_lock);
}
@@ -231,7 +227,7 @@ static int tls_do_decryption(struct sock *sk,
char *iv_recv,
size_t data_len,
struct aead_request *aead_req,
- bool async)
+ struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
@@ -244,7 +240,7 @@ static int tls_do_decryption(struct sock *sk,
data_len + prot->tag_size,
(u8 *)iv_recv);
- if (async) {
+ if (darg->async) {
/* Using skb->sk to push sk through to crypto async callback
* handler. This allows propagating errors up to the socket
* if needed. It _must_ be cleared in the async handler
@@ -264,14 +260,15 @@ static int tls_do_decryption(struct sock *sk,
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS) {
- if (async)
- return ret;
+ if (darg->async)
+ return 0;
ret = crypto_wait_req(ret, &ctx->async_wait);
}
+ darg->async = false;
- if (async)
- atomic_dec(&ctx->decrypt_pending);
+ if (ret == -EBADMSG)
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
return ret;
}
@@ -1346,15 +1343,14 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
return skb;
}
-static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
+static int tls_setup_from_iter(struct iov_iter *from,
int length, int *pages_used,
- unsigned int *size_used,
struct scatterlist *to,
int to_max_pages)
{
int rc = 0, i = 0, num_elem = *pages_used, maxpages;
struct page *pages[MAX_SKB_FRAGS];
- unsigned int size = *size_used;
+ unsigned int size = 0;
ssize_t copied, use;
size_t offset;
@@ -1397,8 +1393,7 @@ static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
sg_mark_end(&to[num_elem - 1]);
out:
if (rc)
- iov_iter_revert(from, size - *size_used);
- *size_used = size;
+ iov_iter_revert(from, size);
*pages_used = num_elem;
return rc;
@@ -1415,12 +1410,13 @@ out:
static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
struct iov_iter *out_iov,
struct scatterlist *out_sg,
- int *chunk, bool *zc, bool async)
+ struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm = strp_msg(skb);
+ struct tls_msg *tlm = tls_msg(skb);
int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
struct aead_request *aead_req;
struct sk_buff *unused;
@@ -1431,7 +1427,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
prot->tail_size;
int iv_offset = 0;
- if (*zc && (out_iov || out_sg)) {
+ if (darg->zc && (out_iov || out_sg)) {
if (out_iov)
n_sgout = 1 +
iov_iter_npages_cap(out_iov, INT_MAX, data_len);
@@ -1441,7 +1437,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
rxm->full_len - prot->prepend_size);
} else {
n_sgout = 0;
- *zc = false;
+ darg->zc = false;
n_sgin = skb_cow_data(skb, 0, &unused);
}
@@ -1456,7 +1452,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
mem_size = aead_size + (nsg * sizeof(struct scatterlist));
mem_size = mem_size + prot->aad_size;
- mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
+ mem_size = mem_size + MAX_IV_SIZE;
/* Allocate a single block of memory which contains
* aead_req || sgin[] || sgout[] || aad || iv.
@@ -1486,26 +1482,26 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
}
/* Prepare IV */
- err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
- iv + iv_offset + prot->salt_size,
- prot->iv_size);
- if (err < 0) {
- kfree(mem);
- return err;
- }
if (prot->version == TLS_1_3_VERSION ||
- prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
memcpy(iv + iv_offset, tls_ctx->rx.iv,
prot->iv_size + prot->salt_size);
- else
+ } else {
+ err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
+ iv + iv_offset + prot->salt_size,
+ prot->iv_size);
+ if (err < 0) {
+ kfree(mem);
+ return err;
+ }
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
-
+ }
xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
prot->tail_size,
- tls_ctx->rx.rec_seq, ctx->control, prot);
+ tls_ctx->rx.rec_seq, tlm->control, prot);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
@@ -1523,9 +1519,8 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], aad, prot->aad_size);
- *chunk = 0;
- err = tls_setup_from_iter(sk, out_iov, data_len,
- &pages, chunk, &sgout[1],
+ err = tls_setup_from_iter(out_iov, data_len,
+ &pages, &sgout[1],
(n_sgout - 1));
if (err < 0)
goto fallback_to_reg_recv;
@@ -1538,15 +1533,14 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
fallback_to_reg_recv:
sgout = sgin;
pages = 0;
- *chunk = data_len;
- *zc = false;
+ darg->zc = false;
}
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, skb, sgin, sgout, iv,
- data_len, aead_req, async);
- if (err == -EINPROGRESS)
- return err;
+ data_len, aead_req, darg);
+ if (darg->async)
+ return 0;
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
@@ -1557,87 +1551,83 @@ fallback_to_reg_recv:
}
static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *dest, int *chunk, bool *zc,
- bool async)
+ struct iov_iter *dest,
+ struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm = strp_msg(skb);
- int pad, err = 0;
+ struct tls_msg *tlm = tls_msg(skb);
+ int pad, err;
- if (!ctx->decrypted) {
- if (tls_ctx->rx_conf == TLS_HW) {
- err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
- if (err < 0)
- return err;
- }
+ if (tlm->decrypted) {
+ darg->zc = false;
+ darg->async = false;
+ return 0;
+ }
- /* Still not decrypted after tls_device */
- if (!ctx->decrypted) {
- err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
- async);
- if (err < 0) {
- if (err == -EINPROGRESS)
- tls_advance_record_sn(sk, prot,
- &tls_ctx->rx);
- else if (err == -EBADMSG)
- TLS_INC_STATS(sock_net(sk),
- LINUX_MIB_TLSDECRYPTERROR);
- return err;
- }
- } else {
- *zc = false;
+ if (tls_ctx->rx_conf == TLS_HW) {
+ err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
+ if (err < 0)
+ return err;
+ if (err > 0) {
+ tlm->decrypted = 1;
+ darg->zc = false;
+ darg->async = false;
+ goto decrypt_done;
}
+ }
- pad = padding_length(ctx, prot, skb);
- if (pad < 0)
- return pad;
+ err = decrypt_internal(sk, skb, dest, NULL, darg);
+ if (err < 0)
+ return err;
+ if (darg->async)
+ goto decrypt_next;
- rxm->full_len -= pad;
- rxm->offset += prot->prepend_size;
- rxm->full_len -= prot->overhead_size;
- tls_advance_record_sn(sk, prot, &tls_ctx->rx);
- ctx->decrypted = 1;
- ctx->saved_data_ready(sk);
- } else {
- *zc = false;
- }
+decrypt_done:
+ pad = padding_length(prot, skb);
+ if (pad < 0)
+ return pad;
- return err;
+ rxm->full_len -= pad;
+ rxm->offset += prot->prepend_size;
+ rxm->full_len -= prot->overhead_size;
+ tlm->decrypted = 1;
+decrypt_next:
+ tls_advance_record_sn(sk, prot, &tls_ctx->rx);
+
+ return 0;
}
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout)
{
- bool zc = true;
- int chunk;
+ struct tls_decrypt_arg darg = { .zc = true, };
- return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
+ return decrypt_internal(sk, skb, NULL, sgout, &darg);
}
-static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
- unsigned int len)
+static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
+ u8 *control)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-
- if (skb) {
- struct strp_msg *rxm = strp_msg(skb);
-
- if (len < rxm->full_len) {
- rxm->offset += len;
- rxm->full_len -= len;
- return false;
+ int err;
+
+ if (!*control) {
+ *control = tlm->control;
+ if (!*control)
+ return -EBADMSG;
+
+ err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(*control), control);
+ if (*control != TLS_RECORD_TYPE_DATA) {
+ if (err || msg->msg_flags & MSG_CTRUNC)
+ return -EIO;
}
- consume_skb(skb);
+ } else if (*control != tlm->control) {
+ return 0;
}
- /* Finished with message */
- ctx->recv_pkt = NULL;
- __strp_unpause(&ctx->strp);
-
- return true;
+ return 1;
}
/* This function traverses the rx_list in tls receive context to copies the
@@ -1648,31 +1638,23 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
static int process_rx_list(struct tls_sw_context_rx *ctx,
struct msghdr *msg,
u8 *control,
- bool *cmsg,
size_t skip,
size_t len,
bool zc,
bool is_peek)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
- u8 ctrl = *control;
- u8 msgc = *cmsg;
struct tls_msg *tlm;
ssize_t copied = 0;
-
- /* Set the record type in 'control' if caller didn't pass it */
- if (!ctrl && skb) {
- tlm = tls_msg(skb);
- ctrl = tlm->control;
- }
+ int err;
while (skip && skb) {
struct strp_msg *rxm = strp_msg(skb);
tlm = tls_msg(skb);
- /* Cannot process a record of different type */
- if (ctrl != tlm->control)
- return 0;
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+ goto out;
if (skip < rxm->full_len)
break;
@@ -1688,30 +1670,15 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
tlm = tls_msg(skb);
- /* Cannot process a record of different type */
- if (ctrl != tlm->control)
- return 0;
-
- /* Set record type if not already done. For a non-data record,
- * do not proceed if record type could not be copied.
- */
- if (!msgc) {
- int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
- sizeof(ctrl), &ctrl);
- msgc = true;
- if (ctrl != TLS_RECORD_TYPE_DATA) {
- if (cerr || msg->msg_flags & MSG_CTRUNC)
- return -EIO;
-
- *cmsg = msgc;
- }
- }
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+ goto out;
if (!zc || (rxm->full_len - skip) > len) {
- int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
msg, chunk);
if (err < 0)
- return err;
+ goto out;
}
len = len - chunk;
@@ -1738,21 +1705,21 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
next_skb = skb_peek_next(skb, &ctx->rx_list);
if (!is_peek) {
- skb_unlink(skb, &ctx->rx_list);
+ __skb_unlink(skb, &ctx->rx_list);
consume_skb(skb);
}
skb = next_skb;
}
+ err = 0;
- *control = ctrl;
- return copied;
+out:
+ return copied ? : err;
}
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
- int nonblock,
int flags,
int *addr_len)
{
@@ -1766,16 +1733,13 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
- bool cmsg = false;
+ bool async = false;
int target, err = 0;
long timeo;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
bool bpf_strp_enabled;
- int num_async = 0;
- int pending;
-
- flags |= nonblock;
+ bool zc_capable;
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
@@ -1784,81 +1748,64 @@ int tls_sw_recvmsg(struct sock *sk,
lock_sock(sk);
bpf_strp_enabled = sk_psock_strp_enabled(psock);
+ /* If crypto failed the connection is broken */
+ err = ctx->async_wait.err;
+ if (err)
+ goto end;
+
/* Process pending decrypted records. It must be non-zero-copy */
- err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
- is_peek);
- if (err < 0) {
- tls_err_abort(sk, err);
+ err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
+ if (err < 0)
goto end;
- } else {
- copied = err;
- }
+ copied = err;
if (len <= copied)
- goto recv_end;
+ goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
len = len - copied;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
+ prot->version != TLS_1_3_VERSION;
+ decrypted = 0;
while (len && (decrypted + copied < target || ctx->recv_pkt)) {
- bool retain_skb = false;
- bool zc = false;
- int to_decrypt;
- int chunk = 0;
- bool async_capable;
- bool async = false;
+ struct tls_decrypt_arg darg = {};
+ int to_decrypt, chunk;
skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
if (!skb) {
if (psock) {
- int ret = sk_msg_recvmsg(sk, psock, msg, len,
- flags);
-
- if (ret > 0) {
- decrypted += ret;
- len -= ret;
- continue;
- }
+ chunk = sk_msg_recvmsg(sk, psock, msg, len,
+ flags);
+ if (chunk > 0)
+ goto leave_on_list;
}
goto recv_end;
- } else {
- tlm = tls_msg(skb);
- if (prot->version == TLS_1_3_VERSION)
- tlm->control = 0;
- else
- tlm->control = ctx->control;
}
rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
to_decrypt = rxm->full_len - prot->overhead_size;
- if (to_decrypt <= len && !is_kvec && !is_peek &&
- ctx->control == TLS_RECORD_TYPE_DATA &&
- prot->version != TLS_1_3_VERSION &&
- !bpf_strp_enabled)
- zc = true;
+ if (zc_capable && to_decrypt <= len &&
+ tlm->control == TLS_RECORD_TYPE_DATA)
+ darg.zc = true;
/* Do not use async mode if record is non-data */
- if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
- async_capable = ctx->async_capable;
+ if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
+ darg.async = ctx->async_capable;
else
- async_capable = false;
+ darg.async = false;
- err = decrypt_skb_update(sk, skb, &msg->msg_iter,
- &chunk, &zc, async_capable);
- if (err < 0 && err != -EINPROGRESS) {
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg);
+ if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
- if (err == -EINPROGRESS) {
- async = true;
- num_async++;
- } else if (prot->version == TLS_1_3_VERSION) {
- tlm->control = ctx->control;
- }
+ async |= darg.async;
/* If the type of records being processed is not known yet,
* set it to record type just dequeued. If it is already known,
@@ -1867,131 +1814,105 @@ int tls_sw_recvmsg(struct sock *sk,
* is known just after record is dequeued from stream parser.
* For tls1.3, we disable async.
*/
-
- if (!control)
- control = tlm->control;
- else if (control != tlm->control)
+ err = tls_record_content_type(msg, tlm, &control);
+ if (err <= 0)
goto recv_end;
- if (!cmsg) {
- int cerr;
-
- cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
- sizeof(control), &control);
- cmsg = true;
- if (control != TLS_RECORD_TYPE_DATA) {
- if (cerr || msg->msg_flags & MSG_CTRUNC) {
- err = -EIO;
- goto recv_end;
- }
- }
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ __skb_queue_tail(&ctx->rx_list, skb);
+
+ if (async) {
+ /* TLS 1.2-only, to_decrypt must be text length */
+ chunk = min_t(int, to_decrypt, len);
+leave_on_list:
+ decrypted += chunk;
+ len -= chunk;
+ continue;
}
+ /* TLS 1.3 may have updated the length by more than overhead */
+ chunk = rxm->full_len;
- if (async)
- goto pick_next_record;
+ if (!darg.zc) {
+ bool partially_consumed = chunk > len;
- if (!zc) {
if (bpf_strp_enabled) {
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
rxm->full_len = 0;
+ __skb_unlink(skb, &ctx->rx_list);
if (err == __SK_DROP)
consume_skb(skb);
- ctx->recv_pkt = NULL;
- __strp_unpause(&ctx->strp);
continue;
}
}
- if (rxm->full_len > len) {
- retain_skb = true;
+ if (partially_consumed)
chunk = len;
- } else {
- chunk = rxm->full_len;
- }
err = skb_copy_datagram_msg(skb, rxm->offset,
msg, chunk);
if (err < 0)
goto recv_end;
- if (!is_peek) {
- rxm->offset = rxm->offset + chunk;
- rxm->full_len = rxm->full_len - chunk;
+ if (is_peek)
+ goto leave_on_list;
+
+ if (partially_consumed) {
+ rxm->offset += chunk;
+ rxm->full_len -= chunk;
+ goto leave_on_list;
}
}
-pick_next_record:
- if (chunk > len)
- chunk = len;
-
decrypted += chunk;
len -= chunk;
- /* For async or peek case, queue the current skb */
- if (async || is_peek || retain_skb) {
- skb_queue_tail(&ctx->rx_list, skb);
- skb = NULL;
- }
+ __skb_unlink(skb, &ctx->rx_list);
+ consume_skb(skb);
- if (tls_sw_advance_skb(sk, skb, chunk)) {
- /* Return full control message to
- * userspace before trying to parse
- * another message type
- */
- msg->msg_flags |= MSG_EOR;
- if (control != TLS_RECORD_TYPE_DATA)
- goto recv_end;
- } else {
+ /* Return full control message to userspace before trying
+ * to parse another message type
+ */
+ msg->msg_flags |= MSG_EOR;
+ if (control != TLS_RECORD_TYPE_DATA)
break;
- }
}
recv_end:
- if (num_async) {
+ if (async) {
+ int ret, pending;
+
/* Wait for all previously submitted records to be decrypted */
spin_lock_bh(&ctx->decrypt_compl_lock);
- ctx->async_notify = true;
+ reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
if (pending) {
- err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- if (err) {
- /* one of async decrypt failed */
- tls_err_abort(sk, err);
- copied = 0;
+ ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ if (ret) {
+ if (err >= 0 || err == -EINPROGRESS)
+ err = ret;
decrypted = 0;
goto end;
}
- } else {
- reinit_completion(&ctx->async_wait.completion);
}
- /* There can be no concurrent accesses, since we have no
- * pending decrypt operations
- */
- WRITE_ONCE(ctx->async_notify, false);
-
/* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec)
- err = process_rx_list(ctx, msg, &control, &cmsg, copied,
+ err = process_rx_list(ctx, msg, &control, copied,
decrypted, false, is_peek);
else
- err = process_rx_list(ctx, msg, &control, &cmsg, 0,
+ err = process_rx_list(ctx, msg, &control, 0,
decrypted, true, is_peek);
- if (err < 0) {
- tls_err_abort(sk, err);
- copied = 0;
- goto end;
- }
+ decrypted = max(err, 0);
}
copied += decrypted;
end:
release_sock(sk);
- sk_defer_free_flush(sk);
if (psock)
sk_psock_put(sk, psock);
return copied ? : err;
@@ -2005,13 +1926,13 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = NULL;
struct sock *sk = sock->sk;
+ struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
bool from_queue;
int err = 0;
long timeo;
int chunk;
- bool zc = false;
lock_sock(sk);
@@ -2021,26 +1942,29 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (from_queue) {
skb = __skb_dequeue(&ctx->rx_list);
} else {
+ struct tls_decrypt_arg darg = {};
+
skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
&err);
if (!skb)
goto splice_read_end;
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
+ err = decrypt_skb_update(sk, skb, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
}
+ rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
+
/* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
goto splice_read_end;
}
- rxm = strp_msg(skb);
-
chunk = min_t(unsigned int, rxm->full_len, len);
copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
if (copied < 0)
@@ -2060,7 +1984,6 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
splice_read_end:
release_sock(sk);
- sk_defer_free_flush(sk);
return copied ? : err;
}
@@ -2084,10 +2007,10 @@ bool tls_sw_sock_is_readable(struct sock *sk)
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
- struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
struct strp_msg *rxm = strp_msg(skb);
+ struct tls_msg *tlm = tls_msg(skb);
size_t cipher_overhead;
size_t data_len = 0;
int ret;
@@ -2104,11 +2027,11 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
/* Linearize header to local buffer */
ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
-
if (ret < 0)
goto read_failure;
- ctx->control = header[0];
+ tlm->decrypted = 0;
+ tlm->control = header[0];
data_len = ((header[4] & 0xFF) | (header[3] << 8));
@@ -2149,8 +2072,6 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb)
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- ctx->decrypted = 0;
-
ctx->recv_pkt = skb;
strp_pause(strp);
@@ -2241,7 +2162,7 @@ void tls_sw_release_resources_rx(struct sock *sk)
if (ctx->aead_recv) {
kfree_skb(ctx->recv_pkt);
ctx->recv_pkt = NULL;
- skb_queue_purge(&ctx->rx_list);
+ __skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
strp_stop(&ctx->strp);
/* If tls_sw_strparser_arm() was not called (cleanup paths)
@@ -2501,7 +2422,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
/* Sanity-check the sizes for stack allocations. */
if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
- rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE) {
rc = -EINVAL;
goto free_priv;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e71a312faa1e..e1dd9e9c8452 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1643,7 +1643,8 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
* so that no locks are necessary.
*/
- skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
+ skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ &err);
if (!skb) {
/* This means receive shutdown. */
if (err == 0)
@@ -2483,8 +2484,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si
const struct proto *prot = READ_ONCE(sk->sk_prot);
if (prot != &unix_dgram_proto)
- return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, NULL);
+ return prot->recvmsg(sk, msg, size, flags, NULL);
#endif
return __unix_dgram_recvmsg(sk, msg, size, flags);
}
@@ -2500,7 +2500,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
int used, err;
mutex_lock(&u->iolock);
- skb = skb_recv_datagram(sk, 0, 1, &err);
+ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
mutex_unlock(&u->iolock);
if (!skb)
return err;
@@ -2916,8 +2916,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
const struct proto *prot = READ_ONCE(sk->sk_prot);
if (prot != &unix_stream_proto)
- return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, NULL);
+ return prot->recvmsg(sk, msg, size, flags, NULL);
#endif
return unix_stream_read_generic(&state, true);
}
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index 452376c6f419..7cf14c6b1725 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -48,8 +48,7 @@ static int __unix_recvmsg(struct sock *sk, struct msghdr *msg,
}
static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags,
- int *addr_len)
+ size_t len, int flags, int *addr_len)
{
struct unix_sock *u = unix_sk(sk);
struct sk_psock *psock;
@@ -73,7 +72,7 @@ msg_bytes_ready:
long timeo;
int data;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = unix_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index ba1c8cc0c467..ad64f403536a 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -566,67 +566,28 @@ out:
mutex_unlock(&vsock->rx_lock);
}
-static int virtio_vsock_probe(struct virtio_device *vdev)
+static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
{
- vq_callback_t *callbacks[] = {
- virtio_vsock_rx_done,
- virtio_vsock_tx_done,
- virtio_vsock_event_done,
- };
+ struct virtio_device *vdev = vsock->vdev;
static const char * const names[] = {
"rx",
"tx",
"event",
};
- struct virtio_vsock *vsock = NULL;
+ vq_callback_t *callbacks[] = {
+ virtio_vsock_rx_done,
+ virtio_vsock_tx_done,
+ virtio_vsock_event_done,
+ };
int ret;
- ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
- if (ret)
- return ret;
-
- /* Only one virtio-vsock device per guest is supported */
- if (rcu_dereference_protected(the_virtio_vsock,
- lockdep_is_held(&the_virtio_vsock_mutex))) {
- ret = -EBUSY;
- goto out;
- }
-
- vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
- if (!vsock) {
- ret = -ENOMEM;
- goto out;
- }
-
- vsock->vdev = vdev;
-
- ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX,
- vsock->vqs, callbacks, names,
+ ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
NULL);
if (ret < 0)
- goto out;
+ return ret;
virtio_vsock_update_guest_cid(vsock);
- vsock->rx_buf_nr = 0;
- vsock->rx_buf_max_nr = 0;
- atomic_set(&vsock->queued_replies, 0);
-
- mutex_init(&vsock->tx_lock);
- mutex_init(&vsock->rx_lock);
- mutex_init(&vsock->event_lock);
- spin_lock_init(&vsock->send_pkt_list_lock);
- INIT_LIST_HEAD(&vsock->send_pkt_list);
- INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
- INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
- INIT_WORK(&vsock->event_work, virtio_transport_event_work);
- INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
-
- if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
- vsock->seqpacket_allow = true;
-
- vdev->priv = vsock;
-
virtio_device_ready(vdev);
mutex_lock(&vsock->tx_lock);
@@ -643,30 +604,15 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
- rcu_assign_pointer(the_virtio_vsock, vsock);
-
- mutex_unlock(&the_virtio_vsock_mutex);
-
return 0;
-
-out:
- kfree(vsock);
- mutex_unlock(&the_virtio_vsock_mutex);
- return ret;
}
-static void virtio_vsock_remove(struct virtio_device *vdev)
+static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
{
- struct virtio_vsock *vsock = vdev->priv;
+ struct virtio_device *vdev = vsock->vdev;
struct virtio_vsock_pkt *pkt;
- mutex_lock(&the_virtio_vsock_mutex);
-
- vdev->priv = NULL;
- rcu_assign_pointer(the_virtio_vsock, NULL);
- synchronize_rcu();
-
- /* Reset all connected sockets when the device disappear */
+ /* Reset all connected sockets when the VQs disappear */
vsock_for_each_connected_socket(&virtio_transport.transport,
virtio_vsock_reset_sock);
@@ -711,6 +657,78 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
+}
+
+static int virtio_vsock_probe(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = NULL;
+ int ret;
+
+ ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
+ if (ret)
+ return ret;
+
+ /* Only one virtio-vsock device per guest is supported */
+ if (rcu_dereference_protected(the_virtio_vsock,
+ lockdep_is_held(&the_virtio_vsock_mutex))) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
+ if (!vsock) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->vdev = vdev;
+
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ atomic_set(&vsock->queued_replies, 0);
+
+ mutex_init(&vsock->tx_lock);
+ mutex_init(&vsock->rx_lock);
+ mutex_init(&vsock->event_lock);
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
+ INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
+ INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+ INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
+
+ if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
+ vdev->priv = vsock;
+
+ ret = virtio_vsock_vqs_init(vsock);
+ if (ret < 0)
+ goto out;
+
+ rcu_assign_pointer(the_virtio_vsock, vsock);
+
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ return 0;
+
+out:
+ kfree(vsock);
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return ret;
+}
+
+static void virtio_vsock_remove(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = vdev->priv;
+
+ mutex_lock(&the_virtio_vsock_mutex);
+
+ vdev->priv = NULL;
+ rcu_assign_pointer(the_virtio_vsock, NULL);
+ synchronize_rcu();
+
+ virtio_vsock_vqs_del(vsock);
/* Other works can be queued before 'config->del_vqs()', so we flush
* all works before to free the vsock object to avoid use after free.
@@ -725,6 +743,49 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
kfree(vsock);
}
+#ifdef CONFIG_PM_SLEEP
+static int virtio_vsock_freeze(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = vdev->priv;
+
+ mutex_lock(&the_virtio_vsock_mutex);
+
+ rcu_assign_pointer(the_virtio_vsock, NULL);
+ synchronize_rcu();
+
+ virtio_vsock_vqs_del(vsock);
+
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ return 0;
+}
+
+static int virtio_vsock_restore(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = vdev->priv;
+ int ret;
+
+ mutex_lock(&the_virtio_vsock_mutex);
+
+ /* Only one virtio-vsock device per guest is supported */
+ if (rcu_dereference_protected(the_virtio_vsock,
+ lockdep_is_held(&the_virtio_vsock_mutex))) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = virtio_vsock_vqs_init(vsock);
+ if (ret < 0)
+ goto out;
+
+ rcu_assign_pointer(the_virtio_vsock, vsock);
+
+out:
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -742,6 +803,10 @@ static struct virtio_driver virtio_vsock_driver = {
.id_table = id_table,
.probe = virtio_vsock_probe,
.remove = virtio_vsock_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtio_vsock_freeze,
+ .restore = virtio_vsock_restore,
+#endif
};
static int __init virtio_vsock_init(void)
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index b17dc9745188..b14f0ed7427b 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1732,19 +1732,16 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
int flags)
{
int err;
- int noblock;
struct vmci_datagram *dg;
size_t payload_len;
struct sk_buff *skb;
- noblock = flags & MSG_DONTWAIT;
-
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
- skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
+ skb = skb_recv_datagram(&vsk->sk, flags, &err);
if (!skb)
return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 21e808fcb676..945ed87d12e0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3710,6 +3710,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
wdev_lock(wdev);
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
if (wdev->ssid_len &&
nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
goto nla_put_failure_locked;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 3a171828638b..6bc2ac8d8146 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1315,8 +1315,7 @@ static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
} else {
/* Now we can treat all alike */
release_sock(sk);
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &rc);
+ skb = skb_recv_datagram(sk, flags, &rc);
lock_sock(sk);
if (!skb)
goto out;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3a9348030e20..e0a4526ab66b 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -184,7 +184,7 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
xsk_xdp = xsk_buff_alloc(xs->pool);
if (!xsk_xdp) {
xs->rx_dropped++;
- return -ENOSPC;
+ return -ENOMEM;
}
xsk_copy_xdp(xsk_xdp, xdp, len);
@@ -217,7 +217,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
{
if (!xsk_is_bound(xs))
- return -EINVAL;
+ return -ENXIO;
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 801cda5d1938..a794410989cc 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -263,7 +263,7 @@ static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
{
- return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false;
+ return xskq_cons_nb_entries(q, cnt) >= cnt;
}
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
@@ -382,7 +382,7 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
u32 idx;
if (xskq_prod_is_full(q))
- return -ENOSPC;
+ return -ENOBUFS;
/* A, matches D */
idx = q->cached_prod++ & q->ring_mask;
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 65b53fb3de13..acc8e52a4f5f 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -9,6 +9,7 @@
#include <net/xdp_sock.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/btf_ids.h>
#include "xsk.h"
@@ -254,7 +255,7 @@ static bool xsk_map_meta_equal(const struct bpf_map *meta0,
bpf_map_meta_equal(meta0, meta1);
}
-static int xsk_map_btf_id;
+BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map)
const struct bpf_map_ops xsk_map_ops = {
.map_meta_equal = xsk_map_meta_equal,
.map_alloc = xsk_map_alloc,
@@ -266,7 +267,6 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf,
- .map_btf_name = "xsk_map",
- .map_btf_id = &xsk_map_btf_id,
+ .map_btf_id = &xsk_map_btf_ids[0],
.map_redirect = xsk_map_redirect,
};
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index 1f08ebf7d80c..82d14eea1b5a 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -131,7 +131,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
}
static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct sk_buff *skb;
@@ -139,8 +139,6 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied;
int off = 0;
- flags |= nonblock ? MSG_DONTWAIT : 0;
-
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
if (!skb) {
if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 38638845db9d..8fff5ad3444b 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -96,7 +96,6 @@ test_cgrp2_sock2-objs := test_cgrp2_sock2.o
xdp1-objs := xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := xdp1_user.o
-xdp_router_ipv4-objs := xdp_router_ipv4_user.o
test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
@@ -124,6 +123,7 @@ xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o $(XDP_SAMPLE)
xdp_redirect_map-objs := xdp_redirect_map_user.o $(XDP_SAMPLE)
xdp_redirect-objs := xdp_redirect_user.o $(XDP_SAMPLE)
xdp_monitor-objs := xdp_monitor_user.o $(XDP_SAMPLE)
+xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE)
# Tell kbuild to always build the programs
always-y := $(tprogs-y)
@@ -153,7 +153,6 @@ always-y += parse_varlen.o parse_simple.o parse_ldabs.o
always-y += test_cgrp2_tc_kern.o
always-y += xdp1_kern.o
always-y += xdp2_kern.o
-always-y += xdp_router_ipv4_kern.o
always-y += test_current_task_under_cgroup_kern.o
always-y += trace_event_kern.o
always-y += sampleip_kern.o
@@ -220,6 +219,7 @@ TPROGLDLIBS_xdp_redirect += -lm
TPROGLDLIBS_xdp_redirect_cpu += -lm
TPROGLDLIBS_xdp_redirect_map += -lm
TPROGLDLIBS_xdp_redirect_map_multi += -lm
+TPROGLDLIBS_xdp_router_ipv4 += -lm -pthread
TPROGLDLIBS_tracex4 += -lrt
TPROGLDLIBS_trace_output += -lrt
TPROGLDLIBS_map_perf_test += -lrt
@@ -342,6 +342,7 @@ $(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h
$(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h
$(obj)/xdp_redirect_user.o: $(obj)/xdp_redirect.skel.h
$(obj)/xdp_monitor_user.o: $(obj)/xdp_monitor.skel.h
+$(obj)/xdp_router_ipv4_user.o: $(obj)/xdp_router_ipv4.skel.h
$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
@@ -399,6 +400,7 @@ $(obj)/xdp_redirect_map_multi.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/xdp_redirect_map.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/xdp_redirect.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/xdp_monitor.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_router_ipv4.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/xdp_sample_shared.h
@echo " CLANG-BPF " $@
@@ -409,7 +411,8 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/x
-c $(filter %.bpf.c,$^) -o $@
LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \
- xdp_redirect_map.skel.h xdp_redirect.skel.h xdp_monitor.skel.h
+ xdp_redirect_map.skel.h xdp_redirect.skel.h xdp_monitor.skel.h \
+ xdp_router_ipv4.skel.h
clean-files += $(LINKED_SKELS)
xdp_redirect_cpu.skel.h-deps := xdp_redirect_cpu.bpf.o xdp_sample.bpf.o
@@ -417,6 +420,7 @@ xdp_redirect_map_multi.skel.h-deps := xdp_redirect_map_multi.bpf.o xdp_sample.bp
xdp_redirect_map.skel.h-deps := xdp_redirect_map.bpf.o xdp_sample.bpf.o
xdp_redirect.skel.h-deps := xdp_redirect.bpf.o xdp_sample.bpf.o
xdp_monitor.skel.h-deps := xdp_monitor.bpf.o xdp_sample.bpf.o
+xdp_router_ipv4.skel.h-deps := xdp_router_ipv4.bpf.o xdp_sample.bpf.o
LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
diff --git a/samples/bpf/cpustat_user.c b/samples/bpf/cpustat_user.c
index 96675985e9e0..ab90bb08a2b4 100644
--- a/samples/bpf/cpustat_user.c
+++ b/samples/bpf/cpustat_user.c
@@ -13,7 +13,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <sys/wait.h>
#include <bpf/bpf.h>
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 1fe5bcafb3bc..516fbac28b71 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -34,7 +34,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
-#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#include <errno.h>
@@ -46,7 +45,6 @@
#include <bpf/bpf.h>
#include <getopt.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#include "hbm.h"
#include "bpf_util.h"
@@ -510,5 +508,8 @@ int main(int argc, char **argv)
prog = argv[optind];
printf("HBM prog: %s\n", prog != NULL ? prog : "NULL");
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
return run_bpf_prog(prog, cg_id);
}
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
index 0746ca516097..d074c978aac7 100644
--- a/samples/bpf/ibumad_user.c
+++ b/samples/bpf/ibumad_user.c
@@ -19,7 +19,6 @@
#include <sys/types.h>
#include <limits.h>
-#include <sys/resource.h>
#include <getopt.h>
#include <net/if.h>
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index e69651a6902f..b6fc174ab1f2 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -13,7 +13,6 @@
#include <signal.h>
#include <string.h>
#include <time.h>
-#include <sys/resource.h>
#include <arpa/inet.h>
#include <errno.h>
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
index 73a986876c1a..b6eedcb98fb9 100644
--- a/samples/bpf/offwaketime_user.c
+++ b/samples/bpf/offwaketime_user.c
@@ -8,7 +8,6 @@
#include <linux/perf_event.h>
#include <errno.h>
#include <stdbool.h>
-#include <sys/resource.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "trace_helpers.h"
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
index 6a3fd369d3fc..2c18471336f0 100644
--- a/samples/bpf/sockex2_user.c
+++ b/samples/bpf/sockex2_user.c
@@ -7,7 +7,6 @@
#include "sock_example.h"
#include <unistd.h>
#include <arpa/inet.h>
-#include <sys/resource.h>
struct pair {
__u64 packets;
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 6ae99ecc766c..cd6fa79df900 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -6,7 +6,6 @@
#include "sock_example.h"
#include <unistd.h>
#include <arpa/inet.h>
-#include <sys/resource.h>
struct flow_key_record {
__be32 src;
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index 0d7e1e5a8658..aadac14f748a 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -3,7 +3,6 @@
#include <unistd.h>
#include <string.h>
#include <assert.h>
-#include <sys/resource.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "trace_helpers.h"
diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
index a0ebf1833ed3..7a788bb837fc 100644
--- a/samples/bpf/syscall_tp_user.c
+++ b/samples/bpf/syscall_tp_user.c
@@ -8,7 +8,6 @@
#include <string.h>
#include <linux/perf_event.h>
#include <errno.h>
-#include <sys/resource.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
@@ -36,6 +35,9 @@ static void verify_map(int map_id)
fprintf(stderr, "failed: map #%d returns value 0\n", map_id);
return;
}
+
+ printf("verify map:%d val: %d\n", map_id, val);
+
val = 0;
if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) {
fprintf(stderr, "map_update failed: %s\n", strerror(errno));
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index c9a0ca8351fd..424718c0872c 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -10,7 +10,6 @@
#include <fcntl.h>
#include <linux/bpf.h>
#include <sys/ioctl.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/perf_event.h>
diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c
index 75e877853596..be98ccb4952f 100644
--- a/samples/bpf/test_lru_dist.c
+++ b/samples/bpf/test_lru_dist.c
@@ -13,7 +13,6 @@
#include <sched.h>
#include <sys/wait.h>
#include <sys/stat.h>
-#include <sys/resource.h>
#include <fcntl.h>
#include <stdlib.h>
#include <time.h>
diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c
index 472d65c70354..e8b4cc184ac9 100644
--- a/samples/bpf/test_map_in_map_user.c
+++ b/samples/bpf/test_map_in_map_user.c
@@ -2,7 +2,6 @@
/*
* Copyright (c) 2017 Facebook
*/
-#include <sys/resource.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdint.h>
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 4821f9d99c1f..88717f8ec6ac 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -16,7 +16,6 @@
#include <linux/bpf.h>
#include <string.h>
#include <time.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 1626d51dfffd..dd6205c6b6a7 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -4,7 +4,6 @@
#include <stdlib.h>
#include <signal.h>
#include <string.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
index 33e16ba39f25..d5eebace31e6 100644
--- a/samples/bpf/tracex3_user.c
+++ b/samples/bpf/tracex3_user.c
@@ -7,7 +7,6 @@
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
index 566e6440e8c2..227b05a0bc88 100644
--- a/samples/bpf/tracex4_user.c
+++ b/samples/bpf/tracex4_user.c
@@ -8,7 +8,6 @@
#include <stdbool.h>
#include <string.h>
#include <time.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 08dfdc77ad2a..e910dc265c31 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -7,7 +7,6 @@
#include <sys/prctl.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include <sys/resource.h>
#include "trace_helpers.h"
#ifdef __mips__
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
index 28296f40c133..8e83bf2a84a4 100644
--- a/samples/bpf/tracex6_user.c
+++ b/samples/bpf/tracex6_user.c
@@ -8,7 +8,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
-#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 631f0cabe139..ac370e638fa3 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -11,7 +11,6 @@
#include <string.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <net/if.h>
#include "bpf_util.h"
@@ -161,7 +160,7 @@ int main(int argc, char **argv)
}
prog_id = info.id;
- poll_stats(map_fd, 2);
+ poll_stats(map_fd, 1);
return 0;
}
diff --git a/samples/bpf/xdp_adjust_tail_user.c b/samples/bpf/xdp_adjust_tail_user.c
index b3f6e49676ed..167646077c8f 100644
--- a/samples/bpf/xdp_adjust_tail_user.c
+++ b/samples/bpf/xdp_adjust_tail_user.c
@@ -14,7 +14,6 @@
#include <stdlib.h>
#include <string.h>
#include <net/if.h>
-#include <sys/resource.h>
#include <arpa/inet.h>
#include <netinet/ether.h>
#include <unistd.h>
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index fb9391a5ec62..58015eb2ffae 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -17,7 +17,6 @@ static const char *__doc_err_only__=
#include <ctype.h>
#include <unistd.h>
#include <locale.h>
-#include <sys/resource.h>
#include <getopt.h>
#include <net/if.h>
#include <time.h>
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 5f74a70a9021..a12381c37d2b 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -21,7 +21,6 @@ static const char *__doc__ =
#include <string.h>
#include <unistd.h>
#include <locale.h>
-#include <sys/resource.h>
#include <sys/sysinfo.h>
#include <getopt.h>
#include <net/if.h>
diff --git a/samples/bpf/xdp_redirect_map_multi_user.c b/samples/bpf/xdp_redirect_map_multi_user.c
index 315314716121..9e24f2705b67 100644
--- a/samples/bpf/xdp_redirect_map_multi_user.c
+++ b/samples/bpf/xdp_redirect_map_multi_user.c
@@ -15,7 +15,6 @@ static const char *__doc__ =
#include <net/if.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
index 7af5b07a7523..8663dd631b6e 100644
--- a/samples/bpf/xdp_redirect_user.c
+++ b/samples/bpf/xdp_redirect_user.c
@@ -18,7 +18,6 @@ static const char *__doc__ =
#include <unistd.h>
#include <libgen.h>
#include <getopt.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
diff --git a/samples/bpf/xdp_router_ipv4.bpf.c b/samples/bpf/xdp_router_ipv4.bpf.c
new file mode 100644
index 000000000000..248119ca7938
--- /dev/null
+++ b/samples/bpf/xdp_router_ipv4.bpf.c
@@ -0,0 +1,180 @@
+/* Copyright (C) 2017 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+
+#define ETH_ALEN 6
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+
+struct trie_value {
+ __u8 prefix[4];
+ __be64 value;
+ int ifindex;
+ int metric;
+ __be32 gw;
+};
+
+/* Key for lpm_trie */
+union key_4 {
+ u32 b32[2];
+ u8 b8[8];
+};
+
+struct arp_entry {
+ __be64 mac;
+ __be32 dst;
+};
+
+struct direct_map {
+ struct arp_entry arp;
+ int ifindex;
+ __be64 mac;
+};
+
+/* Map for trie implementation */
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(key_size, 8);
+ __uint(value_size, sizeof(struct trie_value));
+ __uint(max_entries, 50);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} lpm_map SEC(".maps");
+
+/* Map for ARP table */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, __be64);
+ __uint(max_entries, 50);
+} arp_table SEC(".maps");
+
+/* Map to keep the exact match entries in the route table */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, struct direct_map);
+ __uint(max_entries, 50);
+} exact_match SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 100);
+} tx_port SEC(".maps");
+
+SEC("xdp")
+int xdp_router_ipv4_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ u64 nh_off = sizeof(*eth);
+ struct datarec *rec;
+ __be16 h_proto;
+ u32 key = 0;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (rec)
+ NO_TEAR_INC(rec->processed);
+
+ if (data + nh_off > data_end)
+ goto drop;
+
+ h_proto = eth->h_proto;
+ if (h_proto == bpf_htons(ETH_P_8021Q) ||
+ h_proto == bpf_htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ goto drop;
+
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+
+ switch (bpf_ntohs(h_proto)) {
+ case ETH_P_ARP:
+ if (rec)
+ NO_TEAR_INC(rec->xdp_pass);
+ return XDP_PASS;
+ case ETH_P_IP: {
+ struct iphdr *iph = data + nh_off;
+ struct direct_map *direct_entry;
+ __be64 *dest_mac, *src_mac;
+ int forward_to;
+
+ if (iph + 1 > data_end)
+ goto drop;
+
+ direct_entry = bpf_map_lookup_elem(&exact_match, &iph->daddr);
+
+ /* Check for exact match, this would give a faster lookup */
+ if (direct_entry && direct_entry->mac &&
+ direct_entry->arp.mac) {
+ src_mac = &direct_entry->mac;
+ dest_mac = &direct_entry->arp.mac;
+ forward_to = direct_entry->ifindex;
+ } else {
+ struct trie_value *prefix_value;
+ union key_4 key4;
+
+ /* Look up in the trie for lpm */
+ key4.b32[0] = 32;
+ key4.b8[4] = iph->daddr & 0xff;
+ key4.b8[5] = (iph->daddr >> 8) & 0xff;
+ key4.b8[6] = (iph->daddr >> 16) & 0xff;
+ key4.b8[7] = (iph->daddr >> 24) & 0xff;
+
+ prefix_value = bpf_map_lookup_elem(&lpm_map, &key4);
+ if (!prefix_value)
+ goto drop;
+
+ forward_to = prefix_value->ifindex;
+ src_mac = &prefix_value->value;
+ if (!src_mac)
+ goto drop;
+
+ dest_mac = bpf_map_lookup_elem(&arp_table, &iph->daddr);
+ if (!dest_mac) {
+ if (!prefix_value->gw)
+ goto drop;
+
+ dest_mac = bpf_map_lookup_elem(&arp_table,
+ &prefix_value->gw);
+ }
+ }
+
+ if (src_mac && dest_mac) {
+ int ret;
+
+ __builtin_memcpy(eth->h_dest, dest_mac, ETH_ALEN);
+ __builtin_memcpy(eth->h_source, src_mac, ETH_ALEN);
+
+ ret = bpf_redirect_map(&tx_port, forward_to, 0);
+ if (ret == XDP_REDIRECT) {
+ if (rec)
+ NO_TEAR_INC(rec->xdp_redirect);
+ return ret;
+ }
+ }
+ }
+ default:
+ break;
+ }
+drop:
+ if (rec)
+ NO_TEAR_INC(rec->xdp_drop);
+
+ return XDP_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_router_ipv4_kern.c b/samples/bpf/xdp_router_ipv4_kern.c
deleted file mode 100644
index b37ca2b13063..000000000000
--- a/samples/bpf/xdp_router_ipv4_kern.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* Copyright (C) 2017 Cavium, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-#define KBUILD_MODNAME "foo"
-#include <uapi/linux/bpf.h>
-#include <linux/in.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <bpf/bpf_helpers.h>
-#include <linux/slab.h>
-#include <net/ip_fib.h>
-
-struct trie_value {
- __u8 prefix[4];
- __be64 value;
- int ifindex;
- int metric;
- __be32 gw;
-};
-
-/* Key for lpm_trie*/
-union key_4 {
- u32 b32[2];
- u8 b8[8];
-};
-
-struct arp_entry {
- __be64 mac;
- __be32 dst;
-};
-
-struct direct_map {
- struct arp_entry arp;
- int ifindex;
- __be64 mac;
-};
-
-/* Map for trie implementation*/
-struct {
- __uint(type, BPF_MAP_TYPE_LPM_TRIE);
- __uint(key_size, 8);
- __uint(value_size, sizeof(struct trie_value));
- __uint(max_entries, 50);
- __uint(map_flags, BPF_F_NO_PREALLOC);
-} lpm_map SEC(".maps");
-
-/* Map for counter*/
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, u64);
- __uint(max_entries, 256);
-} rxcnt SEC(".maps");
-
-/* Map for ARP table*/
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, __be32);
- __type(value, __be64);
- __uint(max_entries, 50);
-} arp_table SEC(".maps");
-
-/* Map to keep the exact match entries in the route table*/
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, __be32);
- __type(value, struct direct_map);
- __uint(max_entries, 50);
-} exact_match SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
- __uint(max_entries, 100);
-} tx_port SEC(".maps");
-
-/* Function to set source and destination mac of the packet */
-static inline void set_src_dst_mac(void *data, void *src, void *dst)
-{
- unsigned short *source = src;
- unsigned short *dest = dst;
- unsigned short *p = data;
-
- __builtin_memcpy(p, dest, 6);
- __builtin_memcpy(p + 3, source, 6);
-}
-
-/* Parse IPV4 packet to get SRC, DST IP and protocol */
-static inline int parse_ipv4(void *data, u64 nh_off, void *data_end,
- __be32 *src, __be32 *dest)
-{
- struct iphdr *iph = data + nh_off;
-
- if (iph + 1 > data_end)
- return 0;
- *src = iph->saddr;
- *dest = iph->daddr;
- return iph->protocol;
-}
-
-SEC("xdp_router_ipv4")
-int xdp_router_ipv4_prog(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- __be64 *dest_mac = NULL, *src_mac = NULL;
- void *data = (void *)(long)ctx->data;
- struct trie_value *prefix_value;
- int rc = XDP_DROP, forward_to;
- struct ethhdr *eth = data;
- union key_4 key4;
- long *value;
- u16 h_proto;
- u32 ipproto;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return rc;
-
- h_proto = eth->h_proto;
-
- if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
- struct vlan_hdr *vhdr;
-
- vhdr = data + nh_off;
- nh_off += sizeof(struct vlan_hdr);
- if (data + nh_off > data_end)
- return rc;
- h_proto = vhdr->h_vlan_encapsulated_proto;
- }
- if (h_proto == htons(ETH_P_ARP)) {
- return XDP_PASS;
- } else if (h_proto == htons(ETH_P_IP)) {
- struct direct_map *direct_entry;
- __be32 src_ip = 0, dest_ip = 0;
-
- ipproto = parse_ipv4(data, nh_off, data_end, &src_ip, &dest_ip);
- direct_entry = bpf_map_lookup_elem(&exact_match, &dest_ip);
- /* Check for exact match, this would give a faster lookup*/
- if (direct_entry && direct_entry->mac && direct_entry->arp.mac) {
- src_mac = &direct_entry->mac;
- dest_mac = &direct_entry->arp.mac;
- forward_to = direct_entry->ifindex;
- } else {
- /* Look up in the trie for lpm*/
- key4.b32[0] = 32;
- key4.b8[4] = dest_ip & 0xff;
- key4.b8[5] = (dest_ip >> 8) & 0xff;
- key4.b8[6] = (dest_ip >> 16) & 0xff;
- key4.b8[7] = (dest_ip >> 24) & 0xff;
- prefix_value = bpf_map_lookup_elem(&lpm_map, &key4);
- if (!prefix_value)
- return XDP_DROP;
- src_mac = &prefix_value->value;
- if (!src_mac)
- return XDP_DROP;
- dest_mac = bpf_map_lookup_elem(&arp_table, &dest_ip);
- if (!dest_mac) {
- if (!prefix_value->gw)
- return XDP_DROP;
- dest_ip = prefix_value->gw;
- dest_mac = bpf_map_lookup_elem(&arp_table, &dest_ip);
- }
- forward_to = prefix_value->ifindex;
- }
- } else {
- ipproto = 0;
- }
- if (src_mac && dest_mac) {
- set_src_dst_mac(data, src_mac, dest_mac);
- value = bpf_map_lookup_elem(&rxcnt, &ipproto);
- if (value)
- *value += 1;
- return bpf_redirect_map(&tx_port, forward_to, 0);
- }
- return rc;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index 6dae87d83e1c..294fc15ad1cb 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -22,72 +22,41 @@
#include <sys/syscall.h>
#include "bpf_util.h"
#include <bpf/libbpf.h>
-#include <sys/resource.h>
#include <libgen.h>
+#include <getopt.h>
+#include <pthread.h>
+#include "xdp_sample_user.h"
+#include "xdp_router_ipv4.skel.h"
-int sock, sock_arp, flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-static int total_ifindex;
-static int *ifindex_list;
-static __u32 *prog_id_list;
-char buf[8192];
+static const char *__doc__ =
+"XDP IPv4 router implementation\n"
+"Usage: xdp_router_ipv4 <IFNAME-0> ... <IFNAME-N>\n";
+
+static char buf[8192];
static int lpm_map_fd;
-static int rxcnt_map_fd;
static int arp_table_map_fd;
static int exact_match_map_fd;
static int tx_port_map_fd;
-static int get_route_table(int rtm_family);
-static void int_exit(int sig)
-{
- __u32 prog_id = 0;
- int i = 0;
+static bool routes_thread_exit;
+static int interval = 5;
- for (i = 0; i < total_ifindex; i++) {
- if (bpf_xdp_query_id(ifindex_list[i], flags, &prog_id)) {
- printf("bpf_xdp_query_id on iface %d failed\n",
- ifindex_list[i]);
- exit(1);
- }
- if (prog_id_list[i] == prog_id)
- bpf_xdp_detach(ifindex_list[i], flags, NULL);
- else if (!prog_id)
- printf("couldn't find a prog id on iface %d\n",
- ifindex_list[i]);
- else
- printf("program on iface %d changed, not removing\n",
- ifindex_list[i]);
- prog_id = 0;
- }
- exit(0);
-}
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_EXCEPTION_CNT;
-static void close_and_exit(int sig)
-{
- close(sock);
- close(sock_arp);
+DEFINE_SAMPLE_INIT(xdp_router_ipv4);
- int_exit(0);
-}
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "skb-mode", no_argument, NULL, 'S' },
+ { "force", no_argument, NULL, 'F' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ { "stats", no_argument, NULL, 's' },
+ {}
+};
-/* Get the mac address of the interface given interface name */
-static __be64 getmac(char *iface)
-{
- struct ifreq ifr;
- __be64 mac = 0;
- int fd, i;
-
- fd = socket(AF_INET, SOCK_DGRAM, 0);
- ifr.ifr_addr.sa_family = AF_INET;
- strncpy(ifr.ifr_name, iface, IFNAMSIZ - 1);
- if (ioctl(fd, SIOCGIFHWADDR, &ifr) < 0) {
- printf("ioctl failed leaving....\n");
- return -1;
- }
- for (i = 0; i < 6 ; i++)
- *((__u8 *)&mac + i) = (__u8)ifr.ifr_hwaddr.sa_data[i];
- close(fd);
- return mac;
-}
+static int get_route_table(int rtm_family);
static int recv_msg(struct sockaddr_nl sock_addr, int sock)
{
@@ -130,7 +99,6 @@ static void read_route(struct nlmsghdr *nh, int nll)
int i;
struct route_table {
int dst_len, iface, metric;
- char *iface_name;
__be32 dst, gw;
__be64 mac;
} route;
@@ -145,17 +113,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
__be64 mac;
} direct_entry;
- if (nh->nlmsg_type == RTM_DELROUTE)
- printf("DELETING Route entry\n");
- else if (nh->nlmsg_type == RTM_GETROUTE)
- printf("READING Route entry\n");
- else if (nh->nlmsg_type == RTM_NEWROUTE)
- printf("NEW Route entry\n");
- else
- printf("%d\n", nh->nlmsg_type);
-
memset(&route, 0, sizeof(route));
- printf("Destination Gateway Genmask Metric Iface\n");
for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
rt_msg = (struct rtmsg *)NLMSG_DATA(nh);
rtm_family = rt_msg->rtm_family;
@@ -192,11 +150,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
route.gw = atoi(gws);
route.iface = atoi(ifs);
route.metric = atoi(metrics);
- route.iface_name = alloca(sizeof(char *) * IFNAMSIZ);
- route.iface_name = if_indextoname(route.iface, route.iface_name);
- route.mac = getmac(route.iface_name);
- if (route.mac == -1)
- int_exit(0);
+ assert(get_mac_addr(route.iface, &route.mac) == 0);
assert(bpf_map_update_elem(tx_port_map_fd,
&route.iface, &route.iface, 0) == 0);
if (rtm_family == AF_INET) {
@@ -207,7 +161,6 @@ static void read_route(struct nlmsghdr *nh, int nll)
int metric;
__be32 gw;
} *prefix_value;
- struct in_addr dst_addr, gw_addr, mask_addr;
prefix_key = alloca(sizeof(*prefix_key) + 3);
prefix_value = alloca(sizeof(*prefix_value));
@@ -235,17 +188,6 @@ static void read_route(struct nlmsghdr *nh, int nll)
for (i = 0; i < 4; i++)
prefix_key->data[i] = (route.dst >> i * 8) & 0xff;
- dst_addr.s_addr = route.dst;
- printf("%-16s", inet_ntoa(dst_addr));
-
- gw_addr.s_addr = route.gw;
- printf("%-16s", inet_ntoa(gw_addr));
-
- mask_addr.s_addr = htonl(~(0xffffffffU >> route.dst_len));
- printf("%-16s%-7d%s\n", inet_ntoa(mask_addr),
- route.metric,
- route.iface_name);
-
if (bpf_map_lookup_elem(lpm_map_fd, prefix_key,
prefix_value) < 0) {
for (i = 0; i < 4; i++)
@@ -261,13 +203,6 @@ static void read_route(struct nlmsghdr *nh, int nll)
) == 0);
} else {
if (nh->nlmsg_type == RTM_DELROUTE) {
- printf("deleting entry\n");
- printf("prefix key=%d.%d.%d.%d/%d",
- prefix_key->data[0],
- prefix_key->data[1],
- prefix_key->data[2],
- prefix_key->data[3],
- prefix_key->prefixlen);
assert(bpf_map_delete_elem(lpm_map_fd,
prefix_key
) == 0);
@@ -331,14 +266,14 @@ static int get_route_table(int rtm_family)
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock < 0) {
- printf("open netlink socket: %s\n", strerror(errno));
- return -1;
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return -errno;
}
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
- printf("bind to netlink: %s\n", strerror(errno));
- ret = -1;
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ ret = -errno;
goto cleanup;
}
memset(&req, 0, sizeof(req));
@@ -357,15 +292,15 @@ static int get_route_table(int rtm_family)
msg.msg_iovlen = 1;
ret = sendmsg(sock, &msg, 0);
if (ret < 0) {
- printf("send to netlink: %s\n", strerror(errno));
- ret = -1;
+ fprintf(stderr, "send to netlink: %s\n", strerror(errno));
+ ret = -errno;
goto cleanup;
}
memset(buf, 0, sizeof(buf));
nll = recv_msg(sa, sock);
if (nll < 0) {
- printf("recv from netlink: %s\n", strerror(nll));
- ret = -1;
+ fprintf(stderr, "recv from netlink: %s\n", strerror(nll));
+ ret = nll;
goto cleanup;
}
nh = (struct nlmsghdr *)buf;
@@ -395,14 +330,7 @@ static void read_arp(struct nlmsghdr *nh, int nll)
__be64 mac;
} direct_entry;
- if (nh->nlmsg_type == RTM_GETNEIGH)
- printf("READING arp entry\n");
- printf("Address HwAddress\n");
for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
- struct in_addr dst_addr;
- char mac_str[18];
- int len = 0, i;
-
rt_msg = (struct ndmsg *)NLMSG_DATA(nh);
rt_attr = (struct rtattr *)RTM_RTA(rt_msg);
ndm_family = rt_msg->ndm_family;
@@ -424,13 +352,6 @@ static void read_arp(struct nlmsghdr *nh, int nll)
arp_entry.dst = atoi(dsts);
arp_entry.mac = atol(mac);
- dst_addr.s_addr = arp_entry.dst;
- for (i = 0; i < 6; i++)
- len += snprintf(mac_str + len, 18 - len, "%02llx%s",
- ((arp_entry.mac >> i * 8) & 0xff),
- i < 5 ? ":" : "");
- printf("%-16s%s\n", inet_ntoa(dst_addr), mac_str);
-
if (ndm_family == AF_INET) {
if (bpf_map_lookup_elem(exact_match_map_fd,
&arp_entry.dst,
@@ -481,14 +402,14 @@ static int get_arp_table(int rtm_family)
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock < 0) {
- printf("open netlink socket: %s\n", strerror(errno));
- return -1;
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return -errno;
}
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
- printf("bind to netlink: %s\n", strerror(errno));
- ret = -1;
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ ret = -errno;
goto cleanup;
}
memset(&req, 0, sizeof(req));
@@ -506,15 +427,15 @@ static int get_arp_table(int rtm_family)
msg.msg_iovlen = 1;
ret = sendmsg(sock, &msg, 0);
if (ret < 0) {
- printf("send to netlink: %s\n", strerror(errno));
- ret = -1;
+ fprintf(stderr, "send to netlink: %s\n", strerror(errno));
+ ret = -errno;
goto cleanup;
}
memset(buf, 0, sizeof(buf));
nll = recv_msg(sa, sock);
if (nll < 0) {
- printf("recv from netlink: %s\n", strerror(nll));
- ret = -1;
+ fprintf(stderr, "recv from netlink: %s\n", strerror(nll));
+ ret = nll;
goto cleanup;
}
nh = (struct nlmsghdr *)buf;
@@ -527,24 +448,17 @@ cleanup:
/* Function to keep track and update changes in route and arp table
* Give regular statistics of packets forwarded
*/
-static int monitor_route(void)
+static void *monitor_routes_thread(void *arg)
{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- const unsigned int nr_keys = 256;
struct pollfd fds_route, fds_arp;
- __u64 prev[nr_keys][nr_cpus];
struct sockaddr_nl la, lr;
- __u64 values[nr_cpus];
+ int sock, sock_arp, nll;
struct nlmsghdr *nh;
- int nll, ret = 0;
- int interval = 5;
- __u32 key;
- int i;
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock < 0) {
- printf("open netlink socket: %s\n", strerror(errno));
- return -1;
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return NULL;
}
fcntl(sock, F_SETFL, O_NONBLOCK);
@@ -552,17 +466,19 @@ static int monitor_route(void)
lr.nl_family = AF_NETLINK;
lr.nl_groups = RTMGRP_IPV6_ROUTE | RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY;
if (bind(sock, (struct sockaddr *)&lr, sizeof(lr)) < 0) {
- printf("bind to netlink: %s\n", strerror(errno));
- ret = -1;
- goto cleanup;
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ close(sock);
+ return NULL;
}
+
fds_route.fd = sock;
fds_route.events = POLL_IN;
sock_arp = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock_arp < 0) {
- printf("open netlink socket: %s\n", strerror(errno));
- return -1;
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ close(sock);
+ return NULL;
}
fcntl(sock_arp, F_SETFL, O_NONBLOCK);
@@ -570,51 +486,44 @@ static int monitor_route(void)
la.nl_family = AF_NETLINK;
la.nl_groups = RTMGRP_NEIGH | RTMGRP_NOTIFY;
if (bind(sock_arp, (struct sockaddr *)&la, sizeof(la)) < 0) {
- printf("bind to netlink: %s\n", strerror(errno));
- ret = -1;
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
goto cleanup;
}
+
fds_arp.fd = sock_arp;
fds_arp.events = POLL_IN;
- memset(prev, 0, sizeof(prev));
- do {
- signal(SIGINT, close_and_exit);
- signal(SIGTERM, close_and_exit);
+ /* dump route and arp tables */
+ if (get_arp_table(AF_INET) < 0) {
+ fprintf(stderr, "Failed reading arp table\n");
+ goto cleanup;
+ }
- sleep(interval);
- for (key = 0; key < nr_keys; key++) {
- __u64 sum = 0;
-
- assert(bpf_map_lookup_elem(rxcnt_map_fd,
- &key, values) == 0);
- for (i = 0; i < nr_cpus; i++)
- sum += (values[i] - prev[key][i]);
- if (sum)
- printf("proto %u: %10llu pkt/s\n",
- key, sum / interval);
- memcpy(prev[key], values, sizeof(values));
- }
+ if (get_route_table(AF_INET) < 0) {
+ fprintf(stderr, "Failed reading route table\n");
+ goto cleanup;
+ }
+ while (!routes_thread_exit) {
memset(buf, 0, sizeof(buf));
if (poll(&fds_route, 1, 3) == POLL_IN) {
nll = recv_msg(lr, sock);
if (nll < 0) {
- printf("recv from netlink: %s\n", strerror(nll));
- ret = -1;
+ fprintf(stderr, "recv from netlink: %s\n",
+ strerror(nll));
goto cleanup;
}
nh = (struct nlmsghdr *)buf;
- printf("Routing table updated.\n");
read_route(nh, nll);
}
+
memset(buf, 0, sizeof(buf));
if (poll(&fds_arp, 1, 3) == POLL_IN) {
nll = recv_msg(la, sock_arp);
if (nll < 0) {
- printf("recv from netlink: %s\n", strerror(nll));
- ret = -1;
+ fprintf(stderr, "recv from netlink: %s\n",
+ strerror(nll));
goto cleanup;
}
@@ -622,132 +531,169 @@ static int monitor_route(void)
read_arp(nh, nll);
}
- } while (1);
+ sleep(interval);
+ }
+
cleanup:
+ close(sock_arp);
close(sock);
- return ret;
+ return NULL;
}
-static void usage(const char *prog)
+static void usage(char *argv[], const struct option *long_options,
+ const char *doc, int mask, bool error,
+ struct bpf_object *obj)
{
- fprintf(stderr,
- "%s: %s [OPTS] interface name list\n\n"
- "OPTS:\n"
- " -S use skb-mode\n"
- " -F force loading prog\n",
- __func__, prog);
+ sample_usage(argv, long_options, doc, mask, error);
}
-int main(int ac, char **argv)
+int main(int argc, char **argv)
{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- const char *optstr = "SF";
- struct bpf_program *prog;
- struct bpf_object *obj;
- char filename[256];
- char **ifname_list;
- int prog_fd, opt;
- int err, i = 1;
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
-
- total_ifindex = ac - 1;
- ifname_list = (argv + 1);
-
- while ((opt = getopt(ac, argv, optstr)) != -1) {
+ bool error = true, generic = false, force = false;
+ int opt, ret = EXIT_FAIL_BPF;
+ struct xdp_router_ipv4 *skel;
+ int i, total_ifindex = argc - 1;
+ char **ifname_list = argv + 1;
+ pthread_t routes_thread;
+ int longindex = 0;
+
+ if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) {
+ fprintf(stderr, "Failed to set libbpf strict mode: %s\n",
+ strerror(errno));
+ goto end;
+ }
+
+ skel = xdp_router_ipv4__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_router_ipv4__open: %s\n",
+ strerror(errno));
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n",
+ strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = xdp_router_ipv4__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_router_ipv4__load: %s\n",
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ while ((opt = getopt_long(argc, argv, "si:SFvh",
+ long_options, &longindex)) != -1) {
switch (opt) {
+ case 's':
+ mask |= SAMPLE_REDIRECT_MAP_CNT;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ total_ifindex -= 2;
+ ifname_list += 2;
+ break;
case 'S':
- flags |= XDP_FLAGS_SKB_MODE;
+ generic = true;
total_ifindex--;
ifname_list++;
break;
case 'F':
- flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ force = true;
total_ifindex--;
ifname_list++;
break;
+ case 'v':
+ sample_switch_mode();
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'h':
+ error = false;
default:
- usage(basename(argv[0]));
- return 1;
+ usage(argv, long_options, __doc__, mask, error, skel->obj);
+ goto end_destroy;
}
}
- if (!(flags & XDP_FLAGS_SKB_MODE))
- flags |= XDP_FLAGS_DRV_MODE;
-
- if (optind == ac) {
- usage(basename(argv[0]));
- return 1;
+ ret = EXIT_FAIL_OPTION;
+ if (optind == argc) {
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_destroy;
}
- obj = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(obj))
- return 1;
-
- prog = bpf_object__next_program(obj, NULL);
- bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
-
- printf("\n******************loading bpf file*********************\n");
- err = bpf_object__load(obj);
- if (err) {
- printf("bpf_object__load(): %s\n", strerror(errno));
- return 1;
+ lpm_map_fd = bpf_map__fd(skel->maps.lpm_map);
+ if (lpm_map_fd < 0) {
+ fprintf(stderr, "Failed loading lpm_map %s\n",
+ strerror(-lpm_map_fd));
+ goto end_destroy;
}
- prog_fd = bpf_program__fd(prog);
-
- lpm_map_fd = bpf_object__find_map_fd_by_name(obj, "lpm_map");
- rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
- arp_table_map_fd = bpf_object__find_map_fd_by_name(obj, "arp_table");
- exact_match_map_fd = bpf_object__find_map_fd_by_name(obj,
- "exact_match");
- tx_port_map_fd = bpf_object__find_map_fd_by_name(obj, "tx_port");
- if (lpm_map_fd < 0 || rxcnt_map_fd < 0 || arp_table_map_fd < 0 ||
- exact_match_map_fd < 0 || tx_port_map_fd < 0) {
- printf("bpf_object__find_map_fd_by_name failed\n");
- return 1;
+ arp_table_map_fd = bpf_map__fd(skel->maps.arp_table);
+ if (arp_table_map_fd < 0) {
+ fprintf(stderr, "Failed loading arp_table_map_fd %s\n",
+ strerror(-arp_table_map_fd));
+ goto end_destroy;
}
-
- ifindex_list = (int *)calloc(total_ifindex, sizeof(int *));
- for (i = 0; i < total_ifindex; i++) {
- ifindex_list[i] = if_nametoindex(ifname_list[i]);
- if (!ifindex_list[i]) {
- printf("Couldn't translate interface name: %s",
- strerror(errno));
- return 1;
- }
+ exact_match_map_fd = bpf_map__fd(skel->maps.exact_match);
+ if (exact_match_map_fd < 0) {
+ fprintf(stderr, "Failed loading exact_match_map_fd %s\n",
+ strerror(-exact_match_map_fd));
+ goto end_destroy;
+ }
+ tx_port_map_fd = bpf_map__fd(skel->maps.tx_port);
+ if (tx_port_map_fd < 0) {
+ fprintf(stderr, "Failed loading tx_port_map_fd %s\n",
+ strerror(-tx_port_map_fd));
+ goto end_destroy;
}
- prog_id_list = (__u32 *)calloc(total_ifindex, sizeof(__u32 *));
- for (i = 0; i < total_ifindex; i++) {
- if (bpf_xdp_attach(ifindex_list[i], prog_fd, flags, NULL) < 0) {
- printf("link set xdp fd failed\n");
- int recovery_index = i;
- for (i = 0; i < recovery_index; i++)
- bpf_xdp_detach(ifindex_list[i], flags, NULL);
+ ret = EXIT_FAIL_XDP;
+ for (i = 0; i < total_ifindex; i++) {
+ int index = if_nametoindex(ifname_list[i]);
- return 1;
+ if (!index) {
+ fprintf(stderr, "Interface %s not found %s\n",
+ ifname_list[i], strerror(-tx_port_map_fd));
+ goto end_destroy;
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (err) {
- printf("can't get prog info - %s\n", strerror(errno));
- return err;
- }
- prog_id_list[i] = info.id;
- memset(&info, 0, sizeof(info));
- printf("Attached to %d\n", ifindex_list[i]);
+ if (sample_install_xdp(skel->progs.xdp_router_ipv4_prog,
+ index, generic, force) < 0)
+ goto end_destroy;
}
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
- printf("\n*******************ROUTE TABLE*************************\n");
- get_route_table(AF_INET);
- printf("\n*******************ARP TABLE***************************\n");
- get_arp_table(AF_INET);
- if (monitor_route() < 0) {
- printf("Error in receiving route update");
- return 1;
+ ret = pthread_create(&routes_thread, NULL, monitor_routes_thread, NULL);
+ if (ret) {
+ fprintf(stderr, "Failed creating routes_thread: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
}
- return 0;
+ ret = sample_run(interval, NULL, NULL);
+ routes_thread_exit = true;
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_thread_wait;
+ }
+ ret = EXIT_OK;
+
+end_thread_wait:
+ pthread_join(routes_thread, NULL);
+end_destroy:
+ xdp_router_ipv4__destroy(skel);
+end:
+ sample_exit(ret);
}
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index f2d90cba5164..05a24a712d7d 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -14,7 +14,6 @@ static const char *__doc__ = " XDP RX-queue info extract example\n\n"
#include <string.h>
#include <unistd.h>
#include <locale.h>
-#include <sys/resource.h>
#include <getopt.h>
#include <net/if.h>
#include <time.h>
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 0a2b3e997aed..7df7163239ac 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -12,7 +12,6 @@
#include <signal.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
-#include <sys/resource.h>
#include <libgen.h>
#include <linux/if_link.h>
diff --git a/samples/bpf/xdp_sample_user.c b/samples/bpf/xdp_sample_user.c
index c4332d068b91..158682852162 100644
--- a/samples/bpf/xdp_sample_user.c
+++ b/samples/bpf/xdp_sample_user.c
@@ -25,7 +25,6 @@
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/signalfd.h>
#include <sys/sysinfo.h>
#include <sys/timerfd.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index 2e811e4331cc..307baef6861a 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -10,7 +10,6 @@
#include <stdlib.h>
#include <string.h>
#include <net/if.h>
-#include <sys/resource.h>
#include <arpa/inet.h>
#include <netinet/ether.h>
#include <unistd.h>
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 6f3fe30ad283..be7d2572e3e6 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -25,7 +25,6 @@
#include <string.h>
#include <sys/capability.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
@@ -1886,7 +1885,6 @@ int main(int argc, char **argv)
{
struct __user_cap_header_struct hdr = { _LINUX_CAPABILITY_VERSION_3, 0 };
struct __user_cap_data_struct data[2] = { { 0 } };
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
bool rx = false, tx = false;
struct sched_param schparam;
struct xsk_umem_info *umem;
@@ -1917,11 +1915,8 @@ int main(int argc, char **argv)
data[1].effective, data[1].inheritable, data[1].permitted);
}
} else {
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
- strerror(errno));
- exit(EXIT_FAILURE);
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (opt_num_xsks > 1)
load_xdp_program(argv, &obj);
diff --git a/samples/bpf/xsk_fwd.c b/samples/bpf/xsk_fwd.c
index 2220509588a0..2324e18ccc7e 100644
--- a/samples/bpf/xsk_fwd.c
+++ b/samples/bpf/xsk_fwd.c
@@ -10,7 +10,6 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <time.h>
@@ -131,7 +130,6 @@ static struct bpool *
bpool_init(struct bpool_params *params,
struct xsk_umem_config *umem_cfg)
{
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
u64 n_slabs, n_slabs_reserved, n_buffers, n_buffers_reserved;
u64 slabs_size, slabs_reserved_size;
u64 buffers_size, buffers_reserved_size;
@@ -140,9 +138,8 @@ bpool_init(struct bpool_params *params,
u8 *p;
int status;
- /* mmap prep. */
- if (setrlimit(RLIMIT_MEMLOCK, &r))
- return NULL;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
/* bpool internals dimensioning. */
n_slabs = (params->n_buffers + params->n_buffers_per_slab - 1) /
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0c1e06cf50b9..c740142c24d8 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -17,7 +17,6 @@
#include <linux/magic.h>
#include <net/if.h>
#include <sys/mount.h>
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
@@ -119,13 +118,6 @@ static bool is_bpffs(char *path)
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
-void set_max_rlimit(void)
-{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-}
-
static int
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 290998c82de1..be130e35462f 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -567,7 +567,7 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
res = probe_prog_type_ifindex(prog_type, ifindex);
} else {
- res = libbpf_probe_bpf_prog_type(prog_type, NULL);
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL) > 0;
}
#ifdef USE_LIBCAP
@@ -1136,8 +1136,6 @@ static int do_probe(int argc, char **argv)
__u32 ifindex = 0;
char *ifname;
- set_max_rlimit();
-
while (argc) {
if (is_prefix(*argv, "kernel")) {
if (target != COMPONENT_UNSPEC) {
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 97dec81950e5..8fb0116f9136 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -20,6 +20,9 @@ static const char * const link_type_name[] = {
[BPF_LINK_TYPE_CGROUP] = "cgroup",
[BPF_LINK_TYPE_ITER] = "iter",
[BPF_LINK_TYPE_NETNS] = "netns",
+ [BPF_LINK_TYPE_XDP] = "xdp",
+ [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
};
static struct hashmap *link_table;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index e81227761f5d..9062ef2b8767 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -507,9 +507,9 @@ int main(int argc, char **argv)
* It will still be rejected if users use LIBBPF_STRICT_ALL
* mode for loading generated skeleton.
*/
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
- if (ret)
- p_err("failed to enable libbpf strict mode: %d", ret);
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
+ } else {
+ libbpf_set_strict_mode(LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK);
}
argc -= optind;
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 6e9277ffc68c..aa99ffab451a 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -102,8 +102,6 @@ int detect_common_prefix(const char *arg, ...);
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
void usage(void) __noreturn;
-void set_max_rlimit(void);
-
int mount_tracefs(const char *target);
struct obj_ref {
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index c26378f20831..877387ef79c7 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1342,8 +1342,6 @@ static int do_create(int argc, char **argv)
goto exit;
}
- set_max_rlimit();
-
fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index 50de087b0db7..226ec2c39052 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -11,7 +11,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <ftw.h>
+#include <dirent.h>
#include <bpf/bpf.h>
@@ -147,81 +147,83 @@ static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
}
}
-static int show_proc(const char *fpath, const struct stat *sb,
- int tflag, struct FTW *ftwbuf)
+static int show_proc(void)
{
+ struct dirent *proc_de, *pid_fd_de;
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
- int err, pid = 0, fd = 0;
+ DIR *proc, *pid_fd;
+ int err, pid, fd;
const char *pch;
char buf[4096];
- /* prefix always /proc */
- pch = fpath + 5;
- if (*pch == '\0')
- return 0;
+ proc = opendir("/proc");
+ if (!proc)
+ return -1;
- /* pid should be all numbers */
- pch++;
- while (isdigit(*pch)) {
- pid = pid * 10 + *pch - '0';
- pch++;
- }
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd directory */
- pch++;
- if (strncmp(pch, "fd", 2))
- return FTW_SKIP_SUBTREE;
- pch += 2;
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd/<fd_num> */
- pch++;
- while (isdigit(*pch)) {
- fd = fd * 10 + *pch - '0';
- pch++;
- }
- if (*pch != '\0')
- return FTW_SKIP_SUBTREE;
+ while ((proc_de = readdir(proc))) {
+ pid = 0;
+ pch = proc_de->d_name;
- /* query (pid, fd) for potential perf events */
- len = sizeof(buf);
- err = bpf_task_fd_query(pid, fd, 0, buf, &len, &prog_id, &fd_type,
- &probe_offset, &probe_addr);
- if (err < 0)
- return 0;
+ /* pid should be all numbers */
+ while (isdigit(*pch)) {
+ pid = pid * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
- if (json_output)
- print_perf_json(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
- else
- print_perf_plain(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
+ err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
+ if (err < 0 || err >= (int)sizeof(buf))
+ continue;
+
+ pid_fd = opendir(buf);
+ if (!pid_fd)
+ continue;
+ while ((pid_fd_de = readdir(pid_fd))) {
+ fd = 0;
+ pch = pid_fd_de->d_name;
+
+ /* fd should be all numbers */
+ while (isdigit(*pch)) {
+ fd = fd * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ /* query (pid, fd) for potential perf events */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(pid, fd, 0, buf, &len,
+ &prog_id, &fd_type,
+ &probe_offset, &probe_addr);
+ if (err < 0)
+ continue;
+
+ if (json_output)
+ print_perf_json(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ else
+ print_perf_plain(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ }
+ closedir(pid_fd);
+ }
+ closedir(proc);
return 0;
}
static int do_show(int argc, char **argv)
{
- int flags = FTW_ACTIONRETVAL | FTW_PHYS;
- int err = 0, nopenfd = 16;
+ int err;
if (!has_perf_query_support())
return -1;
if (json_output)
jsonw_start_array(json_wtr);
- if (nftw("/proc", show_proc, nopenfd, flags) == -1) {
- p_err("%s", strerror(errno));
- err = -1;
- }
+ err = show_proc();
if (json_output)
jsonw_end_array(json_wtr);
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
index bb6c969a114a..e2d00d3cd868 100644
--- a/tools/bpf/bpftool/pids.c
+++ b/tools/bpf/bpftool/pids.c
@@ -108,7 +108,6 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
p_err("failed to create hashmap for PID references");
return -1;
}
- set_max_rlimit();
skel = pid_iter_bpf__open();
if (!skel) {
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index bc4e05542c2b..5c2c63df92e8 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -68,6 +68,7 @@ const char * const prog_type_name[] = {
[BPF_PROG_TYPE_EXT] = "ext",
[BPF_PROG_TYPE_LSM] = "lsm",
[BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
+ [BPF_PROG_TYPE_SYSCALL] = "syscall",
};
const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
@@ -1603,8 +1604,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
}
- set_max_rlimit();
-
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
open_opts.kernel_log_level = 1 + 2 + 4;
@@ -2302,7 +2301,6 @@ static int do_profile(int argc, char **argv)
}
}
- set_max_rlimit();
err = profiler_bpf__load(profile_obj);
if (err) {
p_err("failed to load profile_obj");
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index e08a6ff2866c..2535f079ed67 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -501,8 +501,6 @@ static int do_register(int argc, char **argv)
if (libbpf_get_error(obj))
return -1;
- set_max_rlimit();
-
if (bpf_object__load(obj)) {
bpf_object__close(obj);
return -1;
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index e80a5c79b38f..bf1f02212797 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <sys/vfs.h>
#include "main.h"
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
index d78f4148597f..83c5993a139a 100644
--- a/tools/bpf/runqslower/runqslower.c
+++ b/tools/bpf/runqslower/runqslower.c
@@ -4,7 +4,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/resource.h>
#include <time.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
@@ -88,16 +87,6 @@ int libbpf_print_fn(enum libbpf_print_level level,
return vfprintf(stderr, format, args);
}
-static int bump_memlock_rlimit(void)
-{
- struct rlimit rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
-}
-
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
const struct runq_event *e = data;
@@ -133,11 +122,8 @@ int main(int argc, char **argv)
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d", err);
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
obj = runqslower_bpf__open();
if (!obj) {
diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h
index 77f7c1638eb1..8756df13be50 100644
--- a/tools/include/uapi/asm-generic/socket.h
+++ b/tools/include/uapi/asm-generic/socket.h
@@ -119,6 +119,8 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
index 39acc149d843..d7dfeab0d71a 100644
--- a/tools/include/uapi/asm/bpf_perf_event.h
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -1,5 +1,7 @@
#if defined(__aarch64__)
#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__arc__)
+#include "../../arch/arc/include/uapi/asm/bpf_perf_event.h"
#elif defined(__s390__)
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#elif defined(__riscv)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d14b10b85e51..444fe6f1cf35 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5143,6 +5143,17 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5339,6 +5350,7 @@ union bpf_attr {
FN(copy_from_user_task), \
FN(skb_set_tstamp), \
FN(ima_file_hash), \
+ FN(kptr_xchg), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index b0d8fea1951d..a9162a6c0284 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -33,8 +33,8 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 94f0a146bb7b..31a1a9015902 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o
+ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
+ usdt.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 064c89e31560..64741c55b8e3 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -239,7 +239,7 @@ install_lib: all_cmd
SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
- skel_internal.h libbpf_version.h
+ skel_internal.h libbpf_version.h usdt.bpf.h
GEN_HDRS := $(BPF_GENERATED)
INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index cf27251adb92..a9d292c106c2 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -817,7 +817,7 @@ int bpf_link_create(int prog_fd, int target_fd,
{
__u32 target_btf_id, iter_info_len;
union bpf_attr attr;
- int fd;
+ int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return libbpf_err(-EINVAL);
@@ -870,7 +870,37 @@ int bpf_link_create(int prog_fd, int target_fd,
}
proceed:
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
- return libbpf_err_errno(fd);
+ if (fd >= 0)
+ return fd;
+ /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
+ * and other similar programs
+ */
+ err = -errno;
+ if (err != -EINVAL)
+ return libbpf_err(err);
+
+ /* if user used features not supported by
+ * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
+ */
+ if (attr.link_create.target_fd || attr.link_create.target_btf_id)
+ return libbpf_err(err);
+ if (!OPTS_ZEROED(opts, sz))
+ return libbpf_err(err);
+
+ /* otherwise, for few select kinds of programs that can be
+ * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
+ * a fallback for older kernels
+ */
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ return bpf_raw_tracepoint_open(NULL, prog_fd);
+ default:
+ return libbpf_err(err);
+ }
}
int bpf_link_detach(int link_fd)
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 44df982d2a5c..5de3eb267125 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -149,6 +149,13 @@ enum libbpf_tristate {
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
+#if __has_attribute(btf_type_tag)
+#define __kptr __attribute__((btf_type_tag("kptr")))
+#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
+#else
+#define __kptr
+#define __kptr_ref
+#endif
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index e3a8c947e89f..01ce121c302d 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -27,6 +27,9 @@
#elif defined(__TARGET_ARCH_riscv)
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_arc)
+ #define bpf_target_arc
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -54,6 +57,9 @@
#elif defined(__riscv) && __riscv_xlen == 64
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__arc__)
+ #define bpf_target_arc
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -233,6 +239,23 @@ struct pt_regs___arm64 {
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#elif defined(bpf_target_arc)
+
+/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG scratch.r0
+#define __PT_PARM2_REG scratch.r1
+#define __PT_PARM3_REG scratch.r2
+#define __PT_PARM4_REG scratch.r3
+#define __PT_PARM5_REG scratch.r4
+#define __PT_RET_REG scratch.blink
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG scratch.r0
+#define __PT_SP_REG scratch.sp
+#define __PT_IP_REG scratch.ret
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+
#endif
#if defined(bpf_target_defined)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 1383e26c5d1f..bb1e06eb1eca 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -2626,6 +2626,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
+ size_t sec_cnt = 0;
/* The start of the info sec (including the __u32 record_size). */
void *info;
@@ -2689,8 +2690,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- total_record_size = sec_hdrlen +
- (__u64)num_records * record_size;
+ total_record_size = sec_hdrlen + (__u64)num_records * record_size;
if (info_left < total_record_size) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -2699,12 +2699,14 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
info_left -= total_record_size;
sinfo = (void *)sinfo + total_record_size;
+ sec_cnt++;
}
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
ext_info->info = info + sizeof(__u32);
+ ext_info->sec_cnt = sec_cnt;
return 0;
}
@@ -2788,6 +2790,9 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (IS_ERR_OR_NULL(btf_ext))
return;
+ free(btf_ext->func_info.sec_idxs);
+ free(btf_ext->line_info.sec_idxs);
+ free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
free(btf_ext);
}
@@ -2826,10 +2831,8 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
if (err)
goto done;
- if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) {
- err = -EINVAL;
- goto done;
- }
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ goto done; /* skip core relos parsing */
err = btf_ext_setup_core_relos(btf_ext);
if (err)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 809fe209cdcc..73a5192defb3 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -302,7 +302,7 @@ struct bpf_program {
void *priv;
bpf_program_clear_priv_t clear_priv;
- bool load;
+ bool autoload;
bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
@@ -483,6 +483,8 @@ struct elf_state {
int st_ops_shndx;
};
+struct usdt_manager;
+
struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
char license[64];
@@ -545,6 +547,8 @@ struct bpf_object {
size_t fd_array_cap;
size_t fd_array_cnt;
+ struct usdt_manager *usdt_man;
+
char path[];
};
@@ -668,7 +672,18 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->insns_cnt = prog->sec_insn_cnt;
prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->load = true;
+
+ /* libbpf's convention for SEC("?abc...") is that it's just like
+ * SEC("abc...") but the corresponding bpf_program starts out with
+ * autoload set to false.
+ */
+ if (sec_name[0] == '?') {
+ prog->autoload = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ } else {
+ prog->autoload = true;
+ }
prog->instances.fds = NULL;
prog->instances.nr = -1;
@@ -1218,10 +1233,8 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
if (!obj->efile.elf)
return;
- if (obj->efile.elf) {
- elf_end(obj->efile.elf);
- obj->efile.elf = NULL;
- }
+ elf_end(obj->efile.elf);
+ obj->efile.elf = NULL;
obj->efile.symbols = NULL;
obj->efile.st_ops_data = NULL;
@@ -1397,8 +1410,11 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
Elf64_Sym *sym = elf_sym_by_idx(obj, si);
- if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
- ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
+ if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
+ continue;
+
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ ELF64_ST_BIND(sym->st_info) != STB_WEAK)
continue;
sname = elf_sym_str(obj, sym->st_name);
@@ -2749,6 +2765,9 @@ static int bpf_object__init_btf(struct bpf_object *obj,
btf__set_pointer_size(obj->btf, 8);
}
if (btf_ext_data) {
+ struct btf_ext_info *ext_segs[3];
+ int seg_num, sec_num;
+
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
@@ -2762,6 +2781,43 @@ static int bpf_object__init_btf(struct bpf_object *obj,
obj->btf_ext = NULL;
goto out;
}
+
+ /* setup .BTF.ext to ELF section mapping */
+ ext_segs[0] = &obj->btf_ext->func_info;
+ ext_segs[1] = &obj->btf_ext->line_info;
+ ext_segs[2] = &obj->btf_ext->core_relo_info;
+ for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
+ struct btf_ext_info *seg = ext_segs[seg_num];
+ const struct btf_ext_info_sec *sec;
+ const char *sec_name;
+ Elf_Scn *scn;
+
+ if (seg->sec_cnt == 0)
+ continue;
+
+ seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
+ if (!seg->sec_idxs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sec_num = 0;
+ for_each_btf_ext_sec(seg, sec) {
+ /* preventively increment index to avoid doing
+ * this before every continue below
+ */
+ sec_num++;
+
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name))
+ continue;
+ scn = elf_sec_by_name(obj, sec_name);
+ if (!scn)
+ continue;
+
+ seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
+ }
+ }
}
out:
if (err && libbpf_needs_btf(obj)) {
@@ -2920,7 +2976,7 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
}
bpf_object__for_each_program(prog, obj) {
- if (!prog->load)
+ if (!prog->autoload)
continue;
if (prog_needs_vmlinux_btf(prog))
return true;
@@ -4587,7 +4643,7 @@ static int probe_kern_probe_read_kernel(void)
};
int fd, insn_cnt = ARRAY_SIZE(insns);
- fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
return probe_fd(fd);
}
@@ -4678,6 +4734,18 @@ static int probe_perf_link(void)
return link_fd < 0 && err == -EBADF;
}
+static int probe_kern_bpf_cookie(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ return probe_fd(ret);
+}
+
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
@@ -4740,6 +4808,9 @@ static struct kern_feature_desc {
[FEAT_MEMCG_ACCOUNT] = {
"memcg-based memory accounting", probe_memcg_account,
},
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
};
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
@@ -5555,6 +5626,22 @@ static int record_relo_core(struct bpf_program *prog,
return 0;
}
+static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
+{
+ struct reloc_desc *relo;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ relo = &prog->reloc_desc[i];
+ if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
+ continue;
+
+ return relo->core_relo;
+ }
+
+ return NULL;
+}
+
static int bpf_core_resolve_relo(struct bpf_program *prog,
const struct bpf_core_relo *relo,
int relo_idx,
@@ -5611,7 +5698,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
struct bpf_program *prog;
struct bpf_insn *insn;
const char *sec_name;
- int i, err = 0, insn_idx, sec_idx;
+ int i, err = 0, insn_idx, sec_idx, sec_num;
if (obj->btf_ext->core_relo_info.len == 0)
return 0;
@@ -5632,32 +5719,18 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
}
seg = &obj->btf_ext->core_relo_info;
+ sec_num = 0;
for_each_btf_ext_sec(seg, sec) {
+ sec_idx = seg->sec_idxs[sec_num];
+ sec_num++;
+
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
err = -EINVAL;
goto out;
}
- /* bpf_object's ELF is gone by now so it's not easy to find
- * section index by section name, but we can find *any*
- * bpf_program within desired section name and use it's
- * prog->sec_idx to do a proper search by section index and
- * instruction offset
- */
- prog = NULL;
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (strcmp(prog->sec_name, sec_name) == 0)
- break;
- }
- if (!prog) {
- pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
- return -ENOENT;
- }
- sec_idx = prog->sec_idx;
- pr_debug("sec '%s': found %d CO-RE relocations\n",
- sec_name, sec->num_info);
+ pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
if (rec->insn_off % BPF_INSN_SZ)
@@ -5665,15 +5738,22 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
insn_idx = rec->insn_off / BPF_INSN_SZ;
prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
if (!prog) {
- pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
- sec_name, insn_idx, i);
- err = -EINVAL;
- goto out;
+ /* When __weak subprog is "overridden" by another instance
+ * of the subprog from a different object file, linker still
+ * appends all the .BTF.ext info that used to belong to that
+ * eliminated subprogram.
+ * This is similar to what x86-64 linker does for relocations.
+ * So just ignore such relocations just like we ignore
+ * subprog instructions when discovering subprograms.
+ */
+ pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
+ sec_name, i, insn_idx);
+ continue;
}
/* no need to apply CO-RE relocation if the program is
* not going to be loaded
*/
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* adjust insn_idx from section frame of reference to the local
@@ -5685,16 +5765,16 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
return -EINVAL;
insn = &prog->insns[insn_idx];
- if (prog->obj->gen_loader) {
- err = record_relo_core(prog, rec, insn_idx);
- if (err) {
- pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
- prog->name, i, err);
- goto out;
- }
- continue;
+ err = record_relo_core(prog, rec, insn_idx);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+ prog->name, i, err);
+ goto out;
}
+ if (prog->obj->gen_loader)
+ continue;
+
err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
@@ -5834,14 +5914,13 @@ static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
void *rec, *rec_end, *new_prog_info;
const struct btf_ext_info_sec *sec;
size_t old_sz, new_sz;
- const char *sec_name;
- int i, off_adj;
+ int i, sec_num, sec_idx, off_adj;
+ sec_num = 0;
for_each_btf_ext_sec(ext_info, sec) {
- sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
- if (!sec_name)
- return -EINVAL;
- if (strcmp(sec_name, prog->sec_name) != 0)
+ sec_idx = ext_info->sec_idxs[sec_num];
+ sec_num++;
+ if (prog->sec_idx != sec_idx)
continue;
for_each_btf_ext_rec(ext_info, sec, i, rec) {
@@ -6236,7 +6315,6 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
if (err)
return err;
-
return 0;
}
@@ -6297,8 +6375,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
err);
return err;
}
- if (obj->gen_loader)
- bpf_object__sort_relos(obj);
+ bpf_object__sort_relos(obj);
}
/* Before relocating calls pre-process relocations and mark
@@ -6334,7 +6411,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
*/
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_calls(obj, prog);
@@ -6349,7 +6426,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_data(obj, prog);
if (err) {
@@ -6358,8 +6435,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- if (!obj->gen_loader)
- bpf_object__free_relocs(obj);
+
return 0;
}
@@ -6636,6 +6712,8 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
return 0;
}
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
+
static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
struct bpf_insn *insns, int insns_cnt,
const char *license, __u32 kern_version,
@@ -6782,6 +6860,10 @@ retry_load:
goto retry_load;
ret = -errno;
+
+ /* post-process verifier log to improve error descriptions */
+ fixup_verifier_log(prog, log_buf, log_buf_size);
+
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
pr_perm_msg(ret);
@@ -6790,10 +6872,6 @@ retry_load:
pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
prog->name, log_buf);
}
- if (insns_cnt >= BPF_MAXINSNS) {
- pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
- prog->name, insns_cnt, BPF_MAXINSNS);
- }
out:
if (own_log_buf)
@@ -6801,6 +6879,128 @@ out:
return ret;
}
+static char *find_prev_line(char *buf, char *cur)
+{
+ char *p;
+
+ if (cur == buf) /* end of a log buf */
+ return NULL;
+
+ p = cur - 1;
+ while (p - 1 >= buf && *(p - 1) != '\n')
+ p--;
+
+ return p;
+}
+
+static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
+ char *orig, size_t orig_sz, const char *patch)
+{
+ /* size of the remaining log content to the right from the to-be-replaced part */
+ size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
+ size_t patch_sz = strlen(patch);
+
+ if (patch_sz != orig_sz) {
+ /* If patch line(s) are longer than original piece of verifier log,
+ * shift log contents by (patch_sz - orig_sz) bytes to the right
+ * starting from after to-be-replaced part of the log.
+ *
+ * If patch line(s) are shorter than original piece of verifier log,
+ * shift log contents by (orig_sz - patch_sz) bytes to the left
+ * starting from after to-be-replaced part of the log
+ *
+ * We need to be careful about not overflowing available
+ * buf_sz capacity. If that's the case, we'll truncate the end
+ * of the original log, as necessary.
+ */
+ if (patch_sz > orig_sz) {
+ if (orig + patch_sz >= buf + buf_sz) {
+ /* patch is big enough to cover remaining space completely */
+ patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
+ rem_sz = 0;
+ } else if (patch_sz - orig_sz > buf_sz - log_sz) {
+ /* patch causes part of remaining log to be truncated */
+ rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
+ }
+ }
+ /* shift remaining log to the right by calculated amount */
+ memmove(orig + patch_sz, orig + orig_sz, rem_sz);
+ }
+
+ memcpy(orig, patch, patch_sz);
+}
+
+static void fixup_log_failed_core_relo(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded CO-RE relocation:
+ * line1 -> 123: (85) call unknown#195896080
+ * line2 -> invalid func unknown#195896080
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned. We extract
+ * instruction index to find corresponding CO-RE relocation and
+ * replace this part of the log with more relevant information about
+ * failed CO-RE relocation.
+ */
+ const struct bpf_core_relo *relo;
+ struct bpf_core_spec spec;
+ char patch[512], spec_buf[256];
+ int insn_idx, err;
+
+ if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
+ return;
+
+ relo = find_relo_core(prog, insn_idx);
+ if (!relo)
+ return;
+
+ err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
+ if (err)
+ return;
+
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation %s\n",
+ insn_idx, spec_buf);
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
+{
+ /* look for familiar error patterns in last N lines of the log */
+ const size_t max_last_line_cnt = 10;
+ char *prev_line, *cur_line, *next_line;
+ size_t log_sz;
+ int i;
+
+ if (!buf)
+ return;
+
+ log_sz = strlen(buf) + 1;
+ next_line = buf + log_sz - 1;
+
+ for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
+ cur_line = find_prev_line(buf, next_line);
+ if (!cur_line)
+ return;
+
+ /* failed CO-RE relocation case */
+ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ }
+ }
+}
+
static int bpf_program_record_relos(struct bpf_program *prog)
{
struct bpf_object *obj = prog->obj;
@@ -6946,7 +7146,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load) {
+ if (!prog->autoload) {
pr_debug("prog '%s': skipped loading\n", prog->name);
continue;
}
@@ -6955,8 +7155,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
if (err)
return err;
}
- if (obj->gen_loader)
- bpf_object__free_relocs(obj);
+
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -6976,8 +7176,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
continue;
}
- bpf_program__set_type(prog, prog->sec_def->prog_type);
- bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
+ prog->type = prog->sec_def->prog_type;
+ prog->expected_attach_type = prog->sec_def->expected_attach_type;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
@@ -8200,6 +8400,9 @@ void bpf_object__close(struct bpf_object *obj)
if (obj->clear_priv)
obj->clear_priv(obj, obj->priv);
+ usdt_manager_free(obj->usdt_man);
+ obj->usdt_man = NULL;
+
bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
bpf_object_unload(obj);
@@ -8423,7 +8626,7 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
bool bpf_program__autoload(const struct bpf_program *prog)
{
- return prog->load;
+ return prog->autoload;
}
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
@@ -8431,7 +8634,7 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
if (prog->obj->loaded)
return libbpf_err(-EINVAL);
- prog->load = autoload;
+ prog->autoload = autoload;
return 0;
}
@@ -8519,9 +8722,13 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
return prog->type;
}
-void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->type = type;
+ return 0;
}
static bool bpf_program__is_type(const struct bpf_program *prog,
@@ -8535,8 +8742,7 @@ int bpf_program__set_##NAME(struct bpf_program *prog) \
{ \
if (!prog) \
return libbpf_err(-EINVAL); \
- bpf_program__set_type(prog, TYPE); \
- return 0; \
+ return bpf_program__set_type(prog, TYPE); \
} \
\
bool bpf_program__is_##NAME(const struct bpf_program *prog) \
@@ -8566,10 +8772,14 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
return prog->expected_attach_type;
}
-void bpf_program__set_expected_attach_type(struct bpf_program *prog,
+int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->expected_attach_type = type;
+ return 0;
}
__u32 bpf_program__flags(const struct bpf_program *prog)
@@ -8630,6 +8840,8 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
}
static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
@@ -8642,11 +8854,12 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
- SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
- SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
@@ -9636,9 +9849,8 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
* bpf_object__open guessed
*/
if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
- bpf_program__set_type(prog, attr->prog_type);
- bpf_program__set_expected_attach_type(prog,
- attach_type);
+ prog->type = attr->prog_type;
+ prog->expected_attach_type = attach_type;
}
if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -9692,14 +9904,6 @@ int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
}
-struct bpf_link {
- int (*detach)(struct bpf_link *link);
- void (*dealloc)(struct bpf_link *link);
- char *pin_path; /* NULL, if not pinned */
- int fd; /* hook FD, -1 if not applicable */
- bool disconnected;
-};
-
/* Replace link's underlying BPF program with the new one */
int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
{
@@ -10517,6 +10721,273 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
return pfd;
}
+/* uprobes deal in relative offsets; subtract the base address associated with
+ * the mapped binary. See Documentation/trace/uprobetracer.rst for more
+ * details.
+ */
+static long elf_find_relative_offset(const char *filename, Elf *elf, long addr)
+{
+ size_t n;
+ int i;
+
+ if (elf_getphdrnum(elf, &n)) {
+ pr_warn("elf: failed to find program headers for '%s': %s\n", filename,
+ elf_errmsg(-1));
+ return -ENOENT;
+ }
+
+ for (i = 0; i < n; i++) {
+ int seg_start, seg_end, seg_offset;
+ GElf_Phdr phdr;
+
+ if (!gelf_getphdr(elf, i, &phdr)) {
+ pr_warn("elf: failed to get program header %d from '%s': %s\n", i, filename,
+ elf_errmsg(-1));
+ return -ENOENT;
+ }
+ if (phdr.p_type != PT_LOAD || !(phdr.p_flags & PF_X))
+ continue;
+
+ seg_start = phdr.p_vaddr;
+ seg_end = seg_start + phdr.p_memsz;
+ seg_offset = phdr.p_offset;
+ if (addr >= seg_start && addr < seg_end)
+ return addr - seg_start + seg_offset;
+ }
+ pr_warn("elf: failed to find prog header containing 0x%lx in '%s'\n", addr, filename);
+ return -ENOENT;
+}
+
+/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
+static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
+{
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ if (sh.sh_type == sh_type)
+ return scn;
+ }
+ return NULL;
+}
+
+/* Find offset of function name in object specified by path. "name" matches
+ * symbol name or name@@LIB for library functions.
+ */
+static long elf_find_func_offset(const char *binary_path, const char *name)
+{
+ int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ bool is_shared_lib, is_name_qualified;
+ char errmsg[STRERR_BUFSIZE];
+ long ret = -ENOENT;
+ size_t name_len;
+ GElf_Ehdr ehdr;
+ Elf *elf;
+
+ fd = open(binary_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to open %s: %s\n", binary_path,
+ libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ return ret;
+ }
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
+ close(fd);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ /* for shared lib case, we do not need to calculate relative offset */
+ is_shared_lib = ehdr.e_type == ET_DYN;
+
+ name_len = strlen(name);
+ /* Does name specify "@@LIB"? */
+ is_name_qualified = strstr(name, "@@") != NULL;
+
+ /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
+ * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
+ * linked binary may not have SHT_DYMSYM, so absence of a section should not be
+ * reported as a warning/error.
+ */
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ size_t nr_syms, strtabidx, idx;
+ Elf_Data *symbols = NULL;
+ Elf_Scn *scn = NULL;
+ int last_bind = -1;
+ const char *sname;
+ GElf_Shdr sh;
+
+ scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
+ if (!scn) {
+ pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
+ binary_path);
+ continue;
+ }
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ strtabidx = sh.sh_link;
+ symbols = elf_getdata(scn, 0);
+ if (!symbols) {
+ pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
+ binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ nr_syms = symbols->d_size / sh.sh_entsize;
+
+ for (idx = 0; idx < nr_syms; idx++) {
+ int curr_bind;
+ GElf_Sym sym;
+
+ if (!gelf_getsym(symbols, idx, &sym))
+ continue;
+
+ if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ continue;
+
+ sname = elf_strptr(elf, strtabidx, sym.st_name);
+ if (!sname)
+ continue;
+
+ curr_bind = GELF_ST_BIND(sym.st_info);
+
+ /* User can specify func, func@@LIB or func@@LIB_VERSION. */
+ if (strncmp(sname, name, name_len) != 0)
+ continue;
+ /* ...but we don't want a search for "foo" to match 'foo2" also, so any
+ * additional characters in sname should be of the form "@@LIB".
+ */
+ if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
+ continue;
+
+ if (ret >= 0) {
+ /* handle multiple matches */
+ if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
+ /* Only accept one non-weak bind. */
+ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
+ sname, name, binary_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ } else if (curr_bind == STB_WEAK) {
+ /* already have a non-weak bind, and
+ * this is a weak bind, so ignore.
+ */
+ continue;
+ }
+ }
+ ret = sym.st_value;
+ last_bind = curr_bind;
+ }
+ /* For binaries that are not shared libraries, we need relative offset */
+ if (ret > 0 && !is_shared_lib)
+ ret = elf_find_relative_offset(binary_path, elf, ret);
+ if (ret > 0)
+ break;
+ }
+
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
+ ret);
+ } else {
+ if (ret == 0) {
+ pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
+ is_shared_lib ? "should not be 0 in a shared library" :
+ "try using shared library path instead");
+ ret = -ENOENT;
+ } else {
+ pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
+ }
+ }
+out:
+ elf_end(elf);
+ close(fd);
+ return ret;
+}
+
+static const char *arch_specific_lib_paths(void)
+{
+ /*
+ * Based on https://packages.debian.org/sid/libc6.
+ *
+ * Assume that the traced program is built for the same architecture
+ * as libbpf, which should cover the vast majority of cases.
+ */
+#if defined(__x86_64__)
+ return "/lib/x86_64-linux-gnu";
+#elif defined(__i386__)
+ return "/lib/i386-linux-gnu";
+#elif defined(__s390x__)
+ return "/lib/s390x-linux-gnu";
+#elif defined(__s390__)
+ return "/lib/s390-linux-gnu";
+#elif defined(__arm__) && defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabi";
+#elif defined(__arm__) && !defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabihf";
+#elif defined(__aarch64__)
+ return "/lib/aarch64-linux-gnu";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
+ return "/lib/mips64el-linux-gnuabi64";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
+ return "/lib/mipsel-linux-gnu";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return "/lib/powerpc64le-linux-gnu";
+#elif defined(__sparc__) && defined(__arch64__)
+ return "/lib/sparc64-linux-gnu";
+#elif defined(__riscv) && __riscv_xlen == 64
+ return "/lib/riscv64-linux-gnu";
+#else
+ return NULL;
+#endif
+}
+
+/* Get full path to program/shared library. */
+static int resolve_full_path(const char *file, char *result, size_t result_sz)
+{
+ const char *search_paths[3] = {};
+ int i;
+
+ if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
+ search_paths[0] = getenv("LD_LIBRARY_PATH");
+ search_paths[1] = "/usr/lib64:/usr/lib";
+ search_paths[2] = arch_specific_lib_paths();
+ } else {
+ search_paths[0] = getenv("PATH");
+ search_paths[1] = "/usr/bin:/usr/sbin";
+ }
+
+ for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
+ const char *s;
+
+ if (!search_paths[i])
+ continue;
+ for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
+ char *next_path;
+ int seg_len;
+
+ if (s[0] == ':')
+ s++;
+ next_path = strchr(s, ':');
+ seg_len = next_path ? next_path - s : strlen(s);
+ if (!seg_len)
+ continue;
+ snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
+ /* ensure it is an executable file/link */
+ if (access(result, R_OK | X_OK) < 0)
+ continue;
+ pr_debug("resolved '%s' to '%s'\n", file, result);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
LIBBPF_API struct bpf_link *
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
@@ -10524,10 +10995,12 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
+ char full_binary_path[PATH_MAX];
struct bpf_link *link;
size_t ref_ctr_off;
int pfd, err;
bool retprobe, legacy;
+ const char *func_name;
if (!OPTS_VALID(opts, bpf_uprobe_opts))
return libbpf_err_ptr(-EINVAL);
@@ -10536,12 +11009,37 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+ if (binary_path && !strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, full_binary_path,
+ sizeof(full_binary_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = full_binary_path;
+ }
+ func_name = OPTS_GET(opts, func_name, NULL);
+ if (func_name) {
+ long sym_off;
+
+ if (!binary_path) {
+ pr_warn("prog '%s': name-based attach requires binary_path\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+ sym_off = elf_find_func_offset(binary_path, func_name);
+ if (sym_off < 0)
+ return libbpf_err_ptr(sym_off);
+ func_offset += sym_off;
+ }
+
legacy = determine_uprobe_perf_type() < 0;
if (!legacy) {
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);
} else {
- char probe_name[512];
+ char probe_name[PATH_MAX + 64];
if (ref_ctr_off)
return libbpf_err_ptr(-EINVAL);
@@ -10589,6 +11087,60 @@ err_out:
}
+/* Format of u[ret]probe section definition supporting auto-attach:
+ * u[ret]probe/binary:function[+offset]
+ *
+ * binary can be an absolute/relative path or a filename; the latter is resolved to a
+ * full binary path via bpf_program__attach_uprobe_opts.
+ *
+ * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
+ * specified (and auto-attach is not possible) or the above format is specified for
+ * auto-attach.
+ */
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
+ int n, ret = -EINVAL;
+ long offset = 0;
+
+ *link = NULL;
+
+ n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li",
+ &probe_type, &binary_path, &func_name, &offset);
+ switch (n) {
+ case 1:
+ /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
+ ret = 0;
+ break;
+ case 2:
+ pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
+ prog->name, prog->sec_name);
+ break;
+ case 3:
+ case 4:
+ opts.retprobe = strcmp(probe_type, "uretprobe") == 0;
+ if (opts.retprobe && offset != 0) {
+ pr_warn("prog '%s': uretprobes do not support offset specification\n",
+ prog->name);
+ break;
+ }
+ opts.func_name = func_name;
+ *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
+ ret = libbpf_get_error(*link);
+ break;
+ default:
+ pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
+ prog->sec_name);
+ break;
+ }
+ free(probe_type);
+ free(binary_path);
+ free(func_name);
+
+ return ret;
+}
+
struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
bool retprobe, pid_t pid,
const char *binary_path,
@@ -10599,6 +11151,85 @@ struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
}
+struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts)
+{
+ char resolved_path[512];
+ struct bpf_object *obj = prog->obj;
+ struct bpf_link *link;
+ __u64 usdt_cookie;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_uprobe_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (bpf_program__fd(prog) < 0) {
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = resolved_path;
+ }
+
+ /* USDT manager is instantiated lazily on first USDT attach. It will
+ * be destroyed together with BPF object in bpf_object__close().
+ */
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ if (!obj->usdt_man) {
+ obj->usdt_man = usdt_manager_new(obj);
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ }
+
+ usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
+ link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
+ usdt_provider, usdt_name, usdt_cookie);
+ err = libbpf_get_error(link);
+ if (err)
+ return libbpf_err_ptr(err);
+ return link;
+}
+
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ char *path = NULL, *provider = NULL, *name = NULL;
+ const char *sec_name;
+ int n, err;
+
+ sec_name = bpf_program__section_name(prog);
+ if (strcmp(sec_name, "usdt") == 0) {
+ /* no auto-attach for just SEC("usdt") */
+ *link = NULL;
+ return 0;
+ }
+
+ n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
+ if (n != 3) {
+ pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
+ sec_name);
+ err = -EINVAL;
+ } else {
+ *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
+ provider, name, NULL);
+ err = libbpf_get_error(*link);
+ }
+ free(path);
+ free(provider);
+ free(name);
+ return err;
+}
+
static int determine_tracepoint_id(const char *tp_category,
const char *tp_name)
{
@@ -10791,7 +11422,8 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
+ /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
+ pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), NULL);
if (pfd < 0) {
pfd = -errno;
free(link);
@@ -10800,7 +11432,7 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
@@ -12211,7 +12843,7 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* auto-attaching not supported for this program */
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 05dde85e19a6..cdbfee60ea3e 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -378,7 +378,31 @@ struct bpf_link;
LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+/**
+ * @brief **bpf_link__pin()** pins the BPF link to a file
+ * in the BPF FS specified by a path. This increments the links
+ * reference count, allowing it to stay loaded after the process
+ * which loaded it has exited.
+ *
+ * @param link BPF link to pin, must already be loaded
+ * @param path file path in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
+
LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+
+/**
+ * @brief **bpf_link__unpin()** unpins the BPF link from a file
+ * in the BPFFS specified by a path. This decrements the links
+ * reference count.
+ *
+ * The file pinning the BPF link can also be unlinked by a different
+ * process in which case this function will return an error.
+ *
+ * @param prog BPF program to unpin
+ * @param path file path to the pin in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
struct bpf_program *prog);
@@ -386,6 +410,22 @@ LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__detach(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
+/**
+ * @brief **bpf_program__attach()** is a generic function for attaching
+ * a BPF program based on auto-detection of program type, attach type,
+ * and extra paremeters, where applicable.
+ *
+ * @param prog BPF program to attach
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ *
+ * This is supported for:
+ * - kprobe/kretprobe (depends on SEC() definition)
+ * - uprobe/uretprobe (depends on SEC() definition)
+ * - tracepoint
+ * - raw tracepoint
+ * - tracing programs (typed raw TP/fentry/fexit/fmod_ret)
+ */
LIBBPF_API struct bpf_link *
bpf_program__attach(const struct bpf_program *prog);
@@ -459,9 +499,17 @@ struct bpf_uprobe_opts {
__u64 bpf_cookie;
/* uprobe is return probe, invoked at function return time */
bool retprobe;
+ /* Function name to attach to. Could be an unqualified ("abc") or library-qualified
+ * "abc@LIBXYZ" name. To specify function entry, func_name should be set while
+ * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an
+ * offset within a function, specify func_name and use func_offset argument to specify
+ * offset within the function. Shared library functions must specify the shared library
+ * binary_path.
+ */
+ const char *func_name;
size_t :0;
};
-#define bpf_uprobe_opts__last_field retprobe
+#define bpf_uprobe_opts__last_field func_name
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
@@ -503,6 +551,37 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
const struct bpf_uprobe_opts *opts);
+struct bpf_usdt_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value accessible through usdt_cookie() */
+ __u64 usdt_cookie;
+ size_t :0;
+};
+#define bpf_usdt_opts__last_field usdt_cookie
+
+/**
+ * @brief **bpf_program__attach_usdt()** is just like
+ * bpf_program__attach_uprobe_opts() except it covers USDT (User-space
+ * Statically Defined Tracepoint) attachment, instead of attaching to
+ * user-space function entry or exit.
+ *
+ * @param prog BPF program to attach
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary that contains provided USDT probe
+ * @param usdt_provider USDT provider name
+ * @param usdt_name USDT probe name
+ * @param opts Options for altering program attachment
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts);
+
struct bpf_tracepoint_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
@@ -647,12 +726,37 @@ LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
-LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
- enum bpf_prog_type type);
+
+/**
+ * @brief **bpf_program__set_type()** sets the program
+ * type of the passed BPF program.
+ * @param prog BPF program to set the program type for
+ * @param type program type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
bpf_program__expected_attach_type(const struct bpf_program *prog);
-LIBBPF_API void
+
+/**
+ * @brief **bpf_program__set_expected_attach_type()** sets the
+ * attach type of the passed BPF program. This is used for
+ * auto-detection of attachment when programs are loaded.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
@@ -668,6 +772,17 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
+/**
+ * @brief **bpf_program__set_attach_target()** sets BTF-based attach target
+ * for supported BPF program types:
+ * - BTF-aware raw tracepoints (tp_btf);
+ * - fentry/fexit/fmod_ret;
+ * - lsm;
+ * - freplace.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error occurred.
+ */
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index dd35ee58bfaa..82f6d62176dd 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -444,6 +444,7 @@ LIBBPF_0.8.0 {
global:
bpf_object__destroy_subskeleton;
bpf_object__open_subskeleton;
+ bpf_program__attach_usdt;
libbpf_register_prog_handler;
libbpf_unregister_prog_handler;
bpf_program__attach_kprobe_multi_opts;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index b6247dc7f8eb..4abdbe2fea9d 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -103,6 +103,17 @@
#define str_has_pfx(str, pfx) \
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+/* suffix check */
+static inline bool str_has_sfx(const char *str, const char *sfx)
+{
+ size_t str_len = strlen(str);
+ size_t sfx_len = strlen(sfx);
+
+ if (sfx_len <= str_len)
+ return strcmp(str + str_len - sfx_len, sfx);
+ return false;
+}
+
/* Symbol versioning is different between static and shared library.
* Properly versioned symbols are needed for shared library, but
* only the symbol of the new version is needed for static library.
@@ -148,6 +159,15 @@ do { \
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
+
+struct bpf_link {
+ int (*detach)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
+ bool disconnected;
+};
+
/*
* Re-implement glibc's reallocarray() for libbpf internal-only use.
* reallocarray(), unfortunately, is not available in all versions of glibc,
@@ -329,6 +349,8 @@ enum kern_feature_id {
FEAT_BTF_TYPE_TAG,
/* memcg-based accounting for BPF maps and progs */
FEAT_MEMCG_ACCOUNT,
+ /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
+ FEAT_BPF_COOKIE,
__FEAT_CNT,
};
@@ -354,6 +376,13 @@ struct btf_ext_info {
void *info;
__u32 rec_size;
__u32 len;
+ /* optional (maintained internally by libbpf) mapping between .BTF.ext
+ * section and corresponding ELF section. This is used to join
+ * information like CO-RE relocation records with corresponding BPF
+ * programs defined in ELF sections
+ */
+ __u32 *sec_idxs;
+ int sec_cnt;
};
#define for_each_btf_ext_sec(seg, sec) \
@@ -543,4 +572,12 @@ int bpf_core_add_cands(struct bpf_core_cand *local_cand,
struct bpf_core_cand_list *cands);
void bpf_core_free_cands(struct bpf_core_cand_list *cands);
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj);
+void usdt_manager_free(struct usdt_manager *man);
+struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
+ const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index f946f23eab20..ba4453dfd1ed 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -178,29 +178,28 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
* Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
* string to specify enumerator's value index that need to be relocated.
*/
-static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
- __u32 type_id,
- const char *spec_str,
- enum bpf_core_relo_kind relo_kind,
- struct bpf_core_spec *spec)
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec)
{
int access_idx, parsed_len, i;
struct bpf_core_accessor *acc;
const struct btf_type *t;
- const char *name;
+ const char *name, *spec_str;
__u32 id;
__s64 sz;
+ spec_str = btf__name_by_offset(btf, relo->access_str_off);
if (str_is_empty(spec_str) || *spec_str == ':')
return -EINVAL;
memset(spec, 0, sizeof(*spec));
spec->btf = btf;
- spec->root_type_id = type_id;
- spec->relo_kind = relo_kind;
+ spec->root_type_id = relo->type_id;
+ spec->relo_kind = relo->kind;
/* type-based relocations don't have a field access string */
- if (core_relo_is_type_based(relo_kind)) {
+ if (core_relo_is_type_based(relo->kind)) {
if (strcmp(spec_str, "0"))
return -EINVAL;
return 0;
@@ -221,7 +220,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
if (spec->raw_len == 0)
return -EINVAL;
- t = skip_mods_and_typedefs(btf, type_id, &id);
+ t = skip_mods_and_typedefs(btf, relo->type_id, &id);
if (!t)
return -EINVAL;
@@ -231,7 +230,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
acc->idx = access_idx;
spec->len++;
- if (core_relo_is_enumval_based(relo_kind)) {
+ if (core_relo_is_enumval_based(relo->kind)) {
if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
return -EINVAL;
@@ -240,7 +239,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
return 0;
}
- if (!core_relo_is_field_based(relo_kind))
+ if (!core_relo_is_field_based(relo->kind))
return -EINVAL;
sz = btf__resolve_size(btf, id);
@@ -301,7 +300,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
spec->bit_offset += access_idx * sz * 8;
} else {
pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
- prog_name, type_id, spec_str, i, id, btf_kind_str(t));
+ prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
return -EINVAL;
}
}
@@ -1055,51 +1054,66 @@ poison:
* [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
* where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
*/
-static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
{
const struct btf_type *t;
const struct btf_enum *e;
const char *s;
__u32 type_id;
- int i;
+ int i, len = 0;
+
+#define append_buf(fmt, args...) \
+ ({ \
+ int r; \
+ r = snprintf(buf, buf_sz, fmt, ##args); \
+ len += r; \
+ if (r >= buf_sz) \
+ r = buf_sz; \
+ buf += r; \
+ buf_sz -= r; \
+ })
type_id = spec->root_type_id;
t = btf_type_by_id(spec->btf, type_id);
s = btf__name_by_offset(spec->btf, t->name_off);
- libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
+ append_buf("<%s> [%u] %s %s",
+ core_relo_kind_str(spec->relo_kind),
+ type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
if (core_relo_is_type_based(spec->relo_kind))
- return;
+ return len;
if (core_relo_is_enumval_based(spec->relo_kind)) {
t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
e = btf_enum(t) + spec->raw_spec[0];
s = btf__name_by_offset(spec->btf, e->name_off);
- libbpf_print(level, "::%s = %u", s, e->val);
- return;
+ append_buf("::%s = %u", s, e->val);
+ return len;
}
if (core_relo_is_field_based(spec->relo_kind)) {
for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name)
- libbpf_print(level, ".%s", spec->spec[i].name);
+ append_buf(".%s", spec->spec[i].name);
else if (i > 0 || spec->spec[i].idx > 0)
- libbpf_print(level, "[%u]", spec->spec[i].idx);
+ append_buf("[%u]", spec->spec[i].idx);
}
- libbpf_print(level, " (");
+ append_buf(" (");
for (i = 0; i < spec->raw_len; i++)
- libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
+ append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
if (spec->bit_offset % 8)
- libbpf_print(level, " @ offset %u.%u)",
- spec->bit_offset / 8, spec->bit_offset % 8);
+ append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
else
- libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
- return;
+ append_buf(" @ offset %u)", spec->bit_offset / 8);
+ return len;
}
+
+ return len;
+#undef append_buf
}
/*
@@ -1167,7 +1181,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
const struct btf_type *local_type;
const char *local_name;
__u32 local_id;
- const char *spec_str;
+ char spec_buf[256];
int i, j, err;
local_id = relo->type_id;
@@ -1176,24 +1190,20 @@ int bpf_core_calc_relo_insn(const char *prog_name,
if (!local_name)
return -EINVAL;
- spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
- if (str_is_empty(spec_str))
- return -EINVAL;
-
- err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
- relo->kind, local_spec);
+ err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
if (err) {
+ const char *spec_str;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
prog_name, relo_idx, local_id, btf_kind_str(local_type),
str_is_empty(local_name) ? "<anon>" : local_name,
- spec_str, err);
+ spec_str ?: "<?>", err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
- relo_idx, core_relo_kind_str(relo->kind), relo->kind);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
+ pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
@@ -1207,7 +1217,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
}
/* libbpf doesn't support candidate search for anonymous types */
- if (str_is_empty(spec_str)) {
+ if (str_is_empty(local_name)) {
pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
return -EOPNOTSUPP;
@@ -1217,17 +1227,15 @@ int bpf_core_calc_relo_insn(const char *prog_name,
err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
cands->cands[i].id, cand_spec);
if (err < 0) {
- pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
- prog_name, relo_idx, i);
- bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
- libbpf_print(LIBBPF_WARN, ": %d\n", err);
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
+ prog_name, relo_idx, i, spec_buf, err);
return err;
}
- pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
- relo_idx, err == 0 ? "non-matching" : "matching", i);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
+ relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
if (err == 0)
continue;
diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h
index a28bf3711ce2..073039d8ca4f 100644
--- a/tools/lib/bpf/relo_core.h
+++ b/tools/lib/bpf/relo_core.h
@@ -84,4 +84,10 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
int insn_idx, const struct bpf_core_relo *relo,
int relo_idx, const struct bpf_core_relo_res *res);
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec);
+
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec);
+
#endif
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
new file mode 100644
index 000000000000..4181fddb3687
--- /dev/null
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef __USDT_BPF_H__
+#define __USDT_BPF_H__
+
+#include <linux/errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* Below types and maps are internal implementation details of libbpf's USDT
+ * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
+ * be considered an unstable API as well and might be adjusted based on user
+ * feedback from using libbpf's USDT support in production.
+ */
+
+/* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal
+ * map that keeps track of USDT argument specifications. This might be
+ * necessary if there are a lot of USDT attachments.
+ */
+#ifndef BPF_USDT_MAX_SPEC_CNT
+#define BPF_USDT_MAX_SPEC_CNT 256
+#endif
+/* User can override BPF_USDT_MAX_IP_CNT to change default size of internal
+ * map that keeps track of IP (memory address) mapping to USDT argument
+ * specification.
+ * Note, if kernel supports BPF cookies, this map is not used and could be
+ * resized all the way to 1 to save a bit of memory.
+ */
+#ifndef BPF_USDT_MAX_IP_CNT
+#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
+#endif
+/* We use BPF CO-RE to detect support for BPF cookie from BPF side. This is
+ * the only dependency on CO-RE, so if it's undesirable, user can override
+ * BPF_USDT_HAS_BPF_COOKIE to specify whether to BPF cookie is supported or not.
+ */
+#ifndef BPF_USDT_HAS_BPF_COOKIE
+#define BPF_USDT_HAS_BPF_COOKIE \
+ bpf_core_enum_value_exists(enum bpf_func_id___usdt, BPF_FUNC_get_attach_cookie___usdt)
+#endif
+
+enum __bpf_usdt_arg_type {
+ BPF_USDT_ARG_CONST,
+ BPF_USDT_ARG_REG,
+ BPF_USDT_ARG_REG_DEREF,
+};
+
+struct __bpf_usdt_arg_spec {
+ /* u64 scalar interpreted depending on arg_type, see below */
+ __u64 val_off;
+ /* arg location case, see bpf_udst_arg() for details */
+ enum __bpf_usdt_arg_type arg_type;
+ /* offset of referenced register within struct pt_regs */
+ short reg_off;
+ /* whether arg should be interpreted as signed value */
+ bool arg_signed;
+ /* number of bits that need to be cleared and, optionally,
+ * sign-extended to cast arguments that are 1, 2, or 4 bytes
+ * long into final 8-byte u64/s64 value returned to user
+ */
+ char arg_bitshift;
+};
+
+/* should match USDT_MAX_ARG_CNT in usdt.c exactly */
+#define BPF_USDT_MAX_ARG_CNT 12
+struct __bpf_usdt_spec {
+ struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, BPF_USDT_MAX_SPEC_CNT);
+ __type(key, int);
+ __type(value, struct __bpf_usdt_spec);
+} __bpf_usdt_specs SEC(".maps") __weak;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, BPF_USDT_MAX_IP_CNT);
+ __type(key, long);
+ __type(value, __u32);
+} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
+
+/* don't rely on user's BPF code to have latest definition of bpf_func_id */
+enum bpf_func_id___usdt {
+ BPF_FUNC_get_attach_cookie___usdt = 0xBAD, /* value doesn't matter */
+};
+
+static __always_inline
+int __bpf_usdt_spec_id(struct pt_regs *ctx)
+{
+ if (!BPF_USDT_HAS_BPF_COOKIE) {
+ long ip = PT_REGS_IP(ctx);
+ int *spec_id_ptr;
+
+ spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip);
+ return spec_id_ptr ? *spec_id_ptr : -ESRCH;
+ }
+
+ return bpf_get_attach_cookie(ctx);
+}
+
+/* Return number of USDT arguments defined for currently traced USDT. */
+__weak __hidden
+int bpf_usdt_arg_cnt(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ return spec->arg_cnt;
+}
+
+/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
+ * Returns 0 on success; negative error, otherwise.
+ * On error *res is guaranteed to be set to zero.
+ */
+__weak __hidden
+int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
+{
+ struct __bpf_usdt_spec *spec;
+ struct __bpf_usdt_arg_spec *arg_spec;
+ unsigned long val;
+ int err, spec_id;
+
+ *res = 0;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt)
+ return -ENOENT;
+
+ arg_spec = &spec->args[arg_num];
+ switch (arg_spec->arg_type) {
+ case BPF_USDT_ARG_CONST:
+ /* Arg is just a constant ("-4@$-9" in USDT arg spec).
+ * value is recorded in arg_spec->val_off directly.
+ */
+ val = arg_spec->val_off;
+ break;
+ case BPF_USDT_ARG_REG:
+ /* Arg is in a register (e.g, "8@%rax" in USDT arg spec),
+ * so we read the contents of that register directly from
+ * struct pt_regs. To keep things simple user-space parts
+ * record offsetof(struct pt_regs, <regname>) in arg_spec->reg_off.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ break;
+ case BPF_USDT_ARG_REG_DEREF:
+ /* Arg is in memory addressed by register, plus some offset
+ * (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is
+ * identified like with BPF_USDT_ARG_REG case, and the offset
+ * is in arg_spec->val_off. We first fetch register contents
+ * from pt_regs, then do another user-space probe read to
+ * fetch argument value itself.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off);
+ if (err)
+ return err;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ val >>= arg_spec->arg_bitshift;
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing
+ * necessary upper arg_bitshift bits, with sign extension if argument
+ * is signed
+ */
+ val <<= arg_spec->arg_bitshift;
+ if (arg_spec->arg_signed)
+ val = ((long)val) >> arg_spec->arg_bitshift;
+ else
+ val = val >> arg_spec->arg_bitshift;
+ *res = val;
+ return 0;
+}
+
+/* Retrieve user-specified cookie value provided during attach as
+ * bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie
+ * returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself
+ * utilizing BPF cookies internally, so user can't use BPF cookie directly
+ * for USDT programs and has to use bpf_usdt_cookie() API instead.
+ */
+__weak __hidden
+long bpf_usdt_cookie(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return 0;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return 0;
+
+ return spec->usdt_cookie;
+}
+
+/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
+#define ___bpf_usdt_args0() ctx
+#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; })
+#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; })
+#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; })
+#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; })
+#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; })
+#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; })
+#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; })
+#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; })
+#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; })
+#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; })
+#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; })
+#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; })
+#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes.
+ * Original struct pt_regs * context is preserved as 'ctx' argument.
+ */
+#define BPF_USDT(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_usdt_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#endif /* __USDT_BPF_H__ */
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
new file mode 100644
index 000000000000..f1c9339cfbbc
--- /dev/null
+++ b/tools/lib/bpf/usdt.c
@@ -0,0 +1,1518 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <unistd.h>
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+
+/* s8 will be marked as poison while it's a reg of riscv */
+#if defined(__riscv)
+#define rv_s8 s8
+#endif
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+
+/* libbpf's USDT support consists of BPF-side state/code and user-space
+ * state/code working together in concert. BPF-side parts are defined in
+ * usdt.bpf.h header library. User-space state is encapsulated by struct
+ * usdt_manager and all the supporting code centered around usdt_manager.
+ *
+ * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
+ * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
+ * don't support BPF cookie (see below). These two maps are implicitly
+ * embedded into user's end BPF object file when user's code included
+ * usdt.bpf.h. This means that libbpf doesn't do anything special to create
+ * these USDT support maps. They are created by normal libbpf logic of
+ * instantiating BPF maps when opening and loading BPF object.
+ *
+ * As such, libbpf is basically unaware of the need to do anything
+ * USDT-related until the very first call to bpf_program__attach_usdt(), which
+ * can be called by user explicitly or happen automatically during skeleton
+ * attach (or, equivalently, through generic bpf_program__attach() call). At
+ * this point, libbpf will instantiate and initialize struct usdt_manager and
+ * store it in bpf_object. USDT manager is per-BPF object construct, as each
+ * independent BPF object might or might not have USDT programs, and thus all
+ * the expected USDT-related state. There is no coordination between two
+ * bpf_object in parts of USDT attachment, they are oblivious of each other's
+ * existence and libbpf is just oblivious, dealing with bpf_object-specific
+ * USDT state.
+ *
+ * Quick crash course on USDTs.
+ *
+ * From user-space application's point of view, USDT is essentially just
+ * a slightly special function call that normally has zero overhead, unless it
+ * is being traced by some external entity (e.g, BPF-based tool). Here's how
+ * a typical application can trigger USDT probe:
+ *
+ * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
+ * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
+ *
+ * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
+ *
+ * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
+ * individual USDT has a fixed number of arguments (3 in the above example)
+ * and specifies values of each argument as if it was a function call.
+ *
+ * USDT call is actually not a function call, but is instead replaced by
+ * a single NOP instruction (thus zero overhead, effectively). But in addition
+ * to that, those USDT macros generate special SHT_NOTE ELF records in
+ * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
+ * `readelf -n <binary>`:
+ *
+ * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
+ * Provider: test
+ * Name: usdt12
+ * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
+ * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
+ *
+ * In this case we have USDT test:usdt12 with 12 arguments.
+ *
+ * Location and base are offsets used to calculate absolute IP address of that
+ * NOP instruction that kernel can replace with an interrupt instruction to
+ * trigger instrumentation code (BPF program for all that we care about).
+ *
+ * Semaphore above is and optional feature. It records an address of a 2-byte
+ * refcount variable (normally in '.probes' ELF section) used for signaling if
+ * there is anything that is attached to USDT. This is useful for user
+ * applications if, for example, they need to prepare some arguments that are
+ * passed only to USDTs and preparation is expensive. By checking if USDT is
+ * "activated", an application can avoid paying those costs unnecessarily.
+ * Recent enough kernel has built-in support for automatically managing this
+ * refcount, which libbpf expects and relies on. If USDT is defined without
+ * associated semaphore, this value will be zero. See selftests for semaphore
+ * examples.
+ *
+ * Arguments is the most interesting part. This USDT specification string is
+ * providing information about all the USDT arguments and their locations. The
+ * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
+ * whether the argument is signed or unsigned (negative size means signed).
+ * The part after @ sign is assembly-like definition of argument location
+ * (see [0] for more details). Technically, assembler can provide some pretty
+ * advanced definitions, but libbpf is currently supporting three most common
+ * cases:
+ * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
+ * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
+ * whose value is in register %rdx";
+ * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
+ * specifies signed 32-bit integer stored at offset -1204 bytes from
+ * memory address stored in %rbp.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ *
+ * During attachment, libbpf parses all the relevant USDT specifications and
+ * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
+ * code through spec map. This allows BPF applications to quickly fetch the
+ * actual value at runtime using a simple BPF-side code.
+ *
+ * With basics out of the way, let's go over less immediately obvious aspects
+ * of supporting USDTs.
+ *
+ * First, there is no special USDT BPF program type. It is actually just
+ * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
+ * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
+ * that uprobe is usually attached at the function entry, while USDT will
+ * normally will be somewhere inside the function. But it should always be
+ * pointing to NOP instruction, which makes such uprobes the fastest uprobe
+ * kind.
+ *
+ * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
+ * macro invocations can end up being inlined many-many times, depending on
+ * specifics of each individual user application. So single conceptual USDT
+ * (identified by provider:name pair of identifiers) is, generally speaking,
+ * multiple uprobe locations (USDT call sites) in different places in user
+ * application. Further, again due to inlining, each USDT call site might end
+ * up having the same argument #N be located in a different place. In one call
+ * site it could be a constant, in another will end up in a register, and in
+ * yet another could be some other register or even somewhere on the stack.
+ *
+ * As such, "attaching to USDT" means (in general case) attaching the same
+ * uprobe BPF program to multiple target locations in user application, each
+ * potentially having a completely different USDT spec associated with it.
+ * To wire all this up together libbpf allocates a unique integer spec ID for
+ * each unique USDT spec. Spec IDs are allocated as sequential small integers
+ * so that they can be used as keys in array BPF map (for performance reasons).
+ * Spec ID allocation and accounting is big part of what usdt_manager is
+ * about. This state has to be maintained per-BPF object and coordinate
+ * between different USDT attachments within the same BPF object.
+ *
+ * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
+ * as struct usdt_spec. Each invocation of BPF program at runtime needs to
+ * know its associated spec ID. It gets it either through BPF cookie, which
+ * libbpf sets to spec ID during attach time, or, if kernel is too old to
+ * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
+ * case. The latter means that some modes of operation can't be supported
+ * without BPF cookie. Such mode is attaching to shared library "generically",
+ * without specifying target process. In such case, it's impossible to
+ * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
+ * is not supported without BPF cookie support.
+ *
+ * Note that libbpf is using BPF cookie functionality for its own internal
+ * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
+ * provides conceptually equivalent USDT cookie support. It's still u64
+ * user-provided value that can be associated with USDT attachment. Note that
+ * this will be the same value for all USDT call sites within the same single
+ * *logical* USDT attachment. This makes sense because to user attaching to
+ * USDT is a single BPF program triggered for singular USDT probe. The fact
+ * that this is done at multiple actual locations is a mostly hidden
+ * implementation details. This USDT cookie value can be fetched with
+ * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
+ *
+ * Lastly, while single USDT can have tons of USDT call sites, it doesn't
+ * necessarily have that many different USDT specs. It very well might be
+ * that 1000 USDT call sites only need 5 different USDT specs, because all the
+ * arguments are typically contained in a small set of registers or stack
+ * locations. As such, it's wasteful to allocate as many USDT spec IDs as
+ * there are USDT call sites. So libbpf tries to be frugal and performs
+ * on-the-fly deduplication during a single USDT attachment to only allocate
+ * the minimal required amount of unique USDT specs (and thus spec IDs). This
+ * is trivially achieved by using USDT spec string (Arguments string from USDT
+ * note) as a lookup key in a hashmap. USDT spec string uniquely defines
+ * everything about how to fetch USDT arguments, so two USDT call sites
+ * sharing USDT spec string can safely share the same USDT spec and spec ID.
+ * Note, this spec string deduplication is happening only during the same USDT
+ * attachment, so each USDT spec shares the same USDT cookie value. This is
+ * not generally true for other USDT attachments within the same BPF object,
+ * as even if USDT spec string is the same, USDT cookie value can be
+ * different. It was deemed excessive to try to deduplicate across independent
+ * USDT attachments by taking into account USDT spec string *and* USDT cookie
+ * value, which would complicated spec ID accounting significantly for little
+ * gain.
+ */
+
+#define USDT_BASE_SEC ".stapsdt.base"
+#define USDT_SEMA_SEC ".probes"
+#define USDT_NOTE_SEC ".note.stapsdt"
+#define USDT_NOTE_TYPE 3
+#define USDT_NOTE_NAME "stapsdt"
+
+/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
+enum usdt_arg_type {
+ USDT_ARG_CONST,
+ USDT_ARG_REG,
+ USDT_ARG_REG_DEREF,
+};
+
+/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
+struct usdt_arg_spec {
+ __u64 val_off;
+ enum usdt_arg_type arg_type;
+ short reg_off;
+ bool arg_signed;
+ char arg_bitshift;
+};
+
+/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
+#define USDT_MAX_ARG_CNT 12
+
+/* should match struct __bpf_usdt_spec from usdt.bpf.h */
+struct usdt_spec {
+ struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct usdt_note {
+ const char *provider;
+ const char *name;
+ /* USDT args specification string, e.g.:
+ * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
+ */
+ const char *args;
+ long loc_addr;
+ long base_addr;
+ long sema_addr;
+};
+
+struct usdt_target {
+ long abs_ip;
+ long rel_ip;
+ long sema_off;
+ struct usdt_spec spec;
+ const char *spec_str;
+};
+
+struct usdt_manager {
+ struct bpf_map *specs_map;
+ struct bpf_map *ip_to_spec_id_map;
+
+ int *free_spec_ids;
+ size_t free_spec_cnt;
+ size_t next_free_spec_id;
+
+ bool has_bpf_cookie;
+ bool has_sema_refcnt;
+};
+
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
+{
+ static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
+ struct usdt_manager *man;
+ struct bpf_map *specs_map, *ip_to_spec_id_map;
+
+ specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
+ ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
+ if (!specs_map || !ip_to_spec_id_map) {
+ pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
+ return ERR_PTR(-ESRCH);
+ }
+
+ man = calloc(1, sizeof(*man));
+ if (!man)
+ return ERR_PTR(-ENOMEM);
+
+ man->specs_map = specs_map;
+ man->ip_to_spec_id_map = ip_to_spec_id_map;
+
+ /* Detect if BPF cookie is supported for kprobes.
+ * We don't need IP-to-ID mapping if we can use BPF cookies.
+ * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
+ */
+ man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
+
+ /* Detect kernel support for automatic refcounting of USDT semaphore.
+ * If this is not supported, USDTs with semaphores will not be supported.
+ * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
+ */
+ man->has_sema_refcnt = access(ref_ctr_sysfs_path, F_OK) == 0;
+
+ return man;
+}
+
+void usdt_manager_free(struct usdt_manager *man)
+{
+ if (IS_ERR_OR_NULL(man))
+ return;
+
+ free(man->free_spec_ids);
+ free(man);
+}
+
+static int sanity_check_usdt_elf(Elf *elf, const char *path)
+{
+ GElf_Ehdr ehdr;
+ int endianness;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
+ return -EBADF;
+ }
+
+ switch (gelf_getclass(elf)) {
+ case ELFCLASS64:
+ if (sizeof(void *) != 8) {
+ pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ case ELFCLASS32:
+ if (sizeof(void *) != 4) {
+ pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ default:
+ pr_warn("usdt: unsupported ELF class for '%s'\n", path);
+ return -EBADF;
+ }
+
+ if (!gelf_getehdr(elf, &ehdr))
+ return -EINVAL;
+
+ if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
+ pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
+ path, ehdr.e_type);
+ return -EBADF;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ endianness = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ endianness = ELFDATA2MSB;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ if (endianness != ehdr.e_ident[EI_DATA]) {
+ pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
+ return -EBADF;
+ }
+
+ return 0;
+}
+
+static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
+{
+ Elf_Scn *sec = NULL;
+ size_t shstrndx;
+
+ if (elf_getshdrstrndx(elf, &shstrndx))
+ return -EINVAL;
+
+ /* check if ELF is corrupted and avoid calling elf_strptr if yes */
+ if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
+ return -EINVAL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *name;
+
+ if (!gelf_getshdr(sec, shdr))
+ return -EINVAL;
+
+ name = elf_strptr(elf, shstrndx, shdr->sh_name);
+ if (name && strcmp(sec_name, name) == 0) {
+ *scn = sec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+struct elf_seg {
+ long start;
+ long end;
+ long offset;
+ bool is_exec;
+};
+
+static int cmp_elf_segs(const void *_a, const void *_b)
+{
+ const struct elf_seg *a = _a;
+ const struct elf_seg *b = _b;
+
+ return a->start < b->start ? -1 : 1;
+}
+
+static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ GElf_Phdr phdr;
+ size_t n;
+ int i, err;
+ struct elf_seg *seg;
+ void *tmp;
+
+ *seg_cnt = 0;
+
+ if (elf_getphdrnum(elf, &n)) {
+ err = -errno;
+ return err;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (!gelf_getphdr(elf, i, &phdr)) {
+ err = -errno;
+ return err;
+ }
+
+ pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
+ i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
+ (long)phdr.p_type, (long)phdr.p_flags);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp)
+ return -ENOMEM;
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ (*seg_cnt)++;
+
+ seg->start = phdr.p_vaddr;
+ seg->end = phdr.p_vaddr + phdr.p_memsz;
+ seg->offset = phdr.p_offset;
+ seg->is_exec = phdr.p_flags & PF_X;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
+ return -ESRCH;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ return 0;
+}
+
+static int parse_lib_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ char path[PATH_MAX], line[PATH_MAX], mode[16];
+ size_t seg_start, seg_end, seg_off;
+ struct elf_seg *seg;
+ int tmp_pid, i, err;
+ FILE *f;
+
+ *seg_cnt = 0;
+
+ /* Handle containerized binaries only accessible from
+ * /proc/<pid>/root/<path>. They will be reported as just /<path> in
+ * /proc/<pid>/maps.
+ */
+ if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
+ goto proceed;
+
+ if (!realpath(lib_path, path)) {
+ pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
+ lib_path, -errno);
+ libbpf_strlcpy(path, lib_path, sizeof(path));
+ }
+
+proceed:
+ sprintf(line, "/proc/%d/maps", pid);
+ f = fopen(line, "r");
+ if (!f) {
+ err = -errno;
+ pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
+ line, lib_path, err);
+ return err;
+ }
+
+ /* We need to handle lines with no path at the end:
+ *
+ * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
+ * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
+ * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
+ */
+ while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
+ &seg_start, &seg_end, mode, &seg_off, line) == 5) {
+ void *tmp;
+
+ /* to handle no path case (see above) we need to capture line
+ * without skipping any whitespaces. So we need to strip
+ * leading whitespaces manually here
+ */
+ i = 0;
+ while (isblank(line[i]))
+ i++;
+ if (strcmp(line + i, path) != 0)
+ continue;
+
+ pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
+ path, seg_start, seg_end, mode, seg_off);
+
+ /* ignore non-executable sections for shared libs */
+ if (mode[2] != 'x')
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ *seg_cnt += 1;
+
+ seg->start = seg_start;
+ seg->end = seg_end;
+ seg->offset = seg_off;
+ seg->is_exec = true;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
+ lib_path, path, pid);
+ err = -ESRCH;
+ goto err_out;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ err = 0;
+err_out:
+ fclose(f);
+ return err;
+}
+
+static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long addr, bool relative)
+{
+ struct elf_seg *seg;
+ int i;
+
+ if (relative) {
+ /* for shared libraries, address is relative offset and thus
+ * should be fall within logical offset-based range of
+ * [offset_start, offset_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->offset <= addr && addr < seg->offset + (seg->end - seg->start))
+ return seg;
+ }
+ } else {
+ /* for binaries, address is absolute and thus should be within
+ * absolute address range of [seg_start, seg_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->start <= addr && addr < seg->end)
+ return seg;
+ }
+ }
+
+ return NULL;
+}
+
+static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
+ GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *usdt_note);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
+
+static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
+ const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
+ struct usdt_target **out_targets, size_t *out_target_cnt)
+{
+ size_t off, name_off, desc_off, seg_cnt = 0, lib_seg_cnt = 0, target_cnt = 0;
+ struct elf_seg *segs = NULL, *lib_segs = NULL;
+ struct usdt_target *targets = NULL, *target;
+ long base_addr = 0;
+ Elf_Scn *notes_scn, *base_scn;
+ GElf_Shdr base_shdr, notes_shdr;
+ GElf_Ehdr ehdr;
+ GElf_Nhdr nhdr;
+ Elf_Data *data;
+ int err;
+
+ *out_targets = NULL;
+ *out_target_cnt = 0;
+
+ err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
+ if (err) {
+ pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
+ return err;
+ }
+
+ if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
+ pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
+ return -EINVAL;
+ }
+
+ err = parse_elf_segs(elf, path, &segs, &seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
+ goto err_out;
+ }
+
+ /* .stapsdt.base ELF section is optional, but is used for prelink
+ * offset compensation (see a big comment further below)
+ */
+ if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
+ base_addr = base_shdr.sh_addr;
+
+ data = elf_getdata(notes_scn, 0);
+ off = 0;
+ while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
+ long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
+ struct usdt_note note;
+ struct elf_seg *seg = NULL;
+ void *tmp;
+
+ err = parse_usdt_note(elf, path, base_addr, &nhdr,
+ data->d_buf, name_off, desc_off, &note);
+ if (err)
+ goto err_out;
+
+ if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
+ continue;
+
+ /* We need to compensate "prelink effect". See [0] for details,
+ * relevant parts quoted here:
+ *
+ * Each SDT probe also expands into a non-allocated ELF note. You can
+ * find this by looking at SHT_NOTE sections and decoding the format;
+ * see below for details. Because the note is non-allocated, it means
+ * there is no runtime cost, and also preserved in both stripped files
+ * and .debug files.
+ *
+ * However, this means that prelink won't adjust the note's contents
+ * for address offsets. Instead, this is done via the .stapsdt.base
+ * section. This is a special section that is added to the text. We
+ * will only ever have one of these sections in a final link and it
+ * will only ever be one byte long. Nothing about this section itself
+ * matters, we just use it as a marker to detect prelink address
+ * adjustments.
+ *
+ * Each probe note records the link-time address of the .stapsdt.base
+ * section alongside the probe PC address. The decoder compares the
+ * base address stored in the note with the .stapsdt.base section's
+ * sh_addr. Initially these are the same, but the section header will
+ * be adjusted by prelink. So the decoder applies the difference to
+ * the probe PC address to get the correct prelinked PC address; the
+ * same adjustment is applied to the semaphore address, if any.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ */
+ usdt_rel_ip = usdt_abs_ip = note.loc_addr;
+ if (base_addr) {
+ usdt_abs_ip += base_addr - note.base_addr;
+ usdt_rel_ip += base_addr - note.base_addr;
+ }
+
+ if (ehdr.e_type == ET_EXEC) {
+ /* When attaching uprobes (which what USDTs basically
+ * are) kernel expects a relative IP to be specified,
+ * so if we are attaching to an executable ELF binary
+ * (i.e., not a shared library), we need to calculate
+ * proper relative IP based on ELF's load address
+ */
+ seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip, false /* relative */);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_abs_ip);
+ goto err_out;
+ }
+ if (!seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ usdt_abs_ip);
+ goto err_out;
+ }
+
+ usdt_rel_ip = usdt_abs_ip - (seg->start - seg->offset);
+ } else if (!man->has_bpf_cookie) { /* ehdr.e_type == ET_DYN */
+ /* If we don't have BPF cookie support but need to
+ * attach to a shared library, we'll need to know and
+ * record absolute addresses of attach points due to
+ * the need to lookup USDT spec by absolute IP of
+ * triggered uprobe. Doing this resolution is only
+ * possible when we have a specific PID of the process
+ * that's using specified shared library. BPF cookie
+ * removes the absolute address limitation as we don't
+ * need to do this lookup (we just use BPF cookie as
+ * an index of USDT spec), so for newer kernels with
+ * BPF cookie support libbpf supports USDT attachment
+ * to shared libraries with no PID filter.
+ */
+ if (pid < 0) {
+ pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ /* lib_segs are lazily initialized only if necessary */
+ if (lib_seg_cnt == 0) {
+ err = parse_lib_segs(pid, path, &lib_segs, &lib_seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
+ pid, path, err);
+ goto err_out;
+ }
+ }
+
+ seg = find_elf_seg(lib_segs, lib_seg_cnt, usdt_rel_ip, true /* relative */);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_rel_ip);
+ goto err_out;
+ }
+
+ usdt_abs_ip = seg->start + (usdt_rel_ip - seg->offset);
+ }
+
+ pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
+ note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
+ seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
+
+ /* Adjust semaphore address to be a relative offset */
+ if (note.sema_addr) {
+ if (!man->has_sema_refcnt) {
+ pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
+ usdt_provider, usdt_name, path);
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ seg = find_elf_seg(segs, seg_cnt, note.sema_addr, false /* relative */);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
+ usdt_provider, usdt_name, path, note.sema_addr);
+ goto err_out;
+ }
+ if (seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ note.sema_addr);
+ goto err_out;
+ }
+
+ usdt_sema_off = note.sema_addr - (seg->start - seg->offset);
+
+ pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
+ path, note.sema_addr, note.base_addr, usdt_sema_off,
+ seg->start, seg->end, seg->offset);
+ }
+
+ /* Record adjusted addresses and offsets and parse USDT spec */
+ tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ targets = tmp;
+
+ target = &targets[target_cnt];
+ memset(target, 0, sizeof(*target));
+
+ target->abs_ip = usdt_abs_ip;
+ target->rel_ip = usdt_rel_ip;
+ target->sema_off = usdt_sema_off;
+
+ /* notes->args references strings from Elf itself, so they can
+ * be referenced safely until elf_end() call
+ */
+ target->spec_str = note.args;
+
+ err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
+ if (err)
+ goto err_out;
+
+ target_cnt++;
+ }
+
+ *out_targets = targets;
+ *out_target_cnt = target_cnt;
+ err = target_cnt;
+
+err_out:
+ free(segs);
+ free(lib_segs);
+ if (err < 0)
+ free(targets);
+ return err;
+}
+
+struct bpf_link_usdt {
+ struct bpf_link link;
+
+ struct usdt_manager *usdt_man;
+
+ size_t spec_cnt;
+ int *spec_ids;
+
+ size_t uprobe_cnt;
+ struct {
+ long abs_ip;
+ struct bpf_link *link;
+ } *uprobes;
+};
+
+static int bpf_link_usdt_detach(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+ struct usdt_manager *man = usdt_link->usdt_man;
+ int i;
+
+ for (i = 0; i < usdt_link->uprobe_cnt; i++) {
+ /* detach underlying uprobe link */
+ bpf_link__destroy(usdt_link->uprobes[i].link);
+ /* there is no need to update specs map because it will be
+ * unconditionally overwritten on subsequent USDT attaches,
+ * but if BPF cookies are not used we need to remove entry
+ * from ip_to_spec_id map, otherwise we'll run into false
+ * conflicting IP errors
+ */
+ if (!man->has_bpf_cookie) {
+ /* not much we can do about errors here */
+ (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
+ &usdt_link->uprobes[i].abs_ip);
+ }
+ }
+
+ /* try to return the list of previously used spec IDs to usdt_manager
+ * for future reuse for subsequent USDT attaches
+ */
+ if (!man->free_spec_ids) {
+ /* if there were no free spec IDs yet, just transfer our IDs */
+ man->free_spec_ids = usdt_link->spec_ids;
+ man->free_spec_cnt = usdt_link->spec_cnt;
+ usdt_link->spec_ids = NULL;
+ } else {
+ /* otherwise concat IDs */
+ size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
+ int *new_free_ids;
+
+ new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
+ sizeof(*new_free_ids));
+ /* If we couldn't resize free_spec_ids, we'll just leak
+ * a bunch of free IDs; this is very unlikely to happen and if
+ * system is so exhausted on memory, it's the least of user's
+ * concerns, probably.
+ * So just do our best here to return those IDs to usdt_manager.
+ */
+ if (new_free_ids) {
+ memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
+ usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
+ man->free_spec_ids = new_free_ids;
+ man->free_spec_cnt = new_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_link_usdt_dealloc(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+
+ free(usdt_link->spec_ids);
+ free(usdt_link->uprobes);
+ free(usdt_link);
+}
+
+static size_t specs_hash_fn(const void *key, void *ctx)
+{
+ const char *s = key;
+
+ return str_hash(s);
+}
+
+static bool specs_equal_fn(const void *key1, const void *key2, void *ctx)
+{
+ const char *s1 = key1;
+ const char *s2 = key2;
+
+ return strcmp(s1, s2) == 0;
+}
+
+static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
+ struct bpf_link_usdt *link, struct usdt_target *target,
+ int *spec_id, bool *is_new)
+{
+ void *tmp;
+ int err;
+
+ /* check if we already allocated spec ID for this spec string */
+ if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
+ *spec_id = (long)tmp;
+ *is_new = false;
+ return 0;
+ }
+
+ /* otherwise it's a new ID that needs to be set up in specs map and
+ * returned back to usdt_manager when USDT link is detached
+ */
+ tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
+ if (!tmp)
+ return -ENOMEM;
+ link->spec_ids = tmp;
+
+ /* get next free spec ID, giving preference to free list, if not empty */
+ if (man->free_spec_cnt) {
+ *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ if (err)
+ return err;
+
+ man->free_spec_cnt--;
+ } else {
+ /* don't allocate spec ID bigger than what fits in specs map */
+ if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
+ return -E2BIG;
+
+ *spec_id = man->next_free_spec_id;
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ if (err)
+ return err;
+
+ man->next_free_spec_id++;
+ }
+
+ /* remember new spec ID in the link for later return back to free list on detach */
+ link->spec_ids[link->spec_cnt] = *spec_id;
+ link->spec_cnt++;
+ *is_new = true;
+ return 0;
+}
+
+struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie)
+{
+ int i, fd, err, spec_map_fd, ip_map_fd;
+ LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct hashmap *specs_hash = NULL;
+ struct bpf_link_usdt *link = NULL;
+ struct usdt_target *targets = NULL;
+ size_t target_cnt;
+ Elf *elf;
+
+ spec_map_fd = bpf_map__fd(man->specs_map);
+ ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
+
+ /* TODO: perform path resolution similar to uprobe's */
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
+ return libbpf_err_ptr(err);
+ }
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ err = -EBADF;
+ pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
+ goto err_out;
+ }
+
+ err = sanity_check_usdt_elf(elf, path);
+ if (err)
+ goto err_out;
+
+ /* normalize PID filter */
+ if (pid < 0)
+ pid = -1;
+ else if (pid == 0)
+ pid = getpid();
+
+ /* discover USDT in given binary, optionally limiting
+ * activations to a given PID, if pid > 0
+ */
+ err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
+ usdt_cookie, &targets, &target_cnt);
+ if (err <= 0) {
+ err = (err == 0) ? -ENOENT : err;
+ goto err_out;
+ }
+
+ specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
+ if (IS_ERR(specs_hash)) {
+ err = PTR_ERR(specs_hash);
+ goto err_out;
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ link->usdt_man = man;
+ link->link.detach = &bpf_link_usdt_detach;
+ link->link.dealloc = &bpf_link_usdt_dealloc;
+
+ link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
+ if (!link->uprobes) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < target_cnt; i++) {
+ struct usdt_target *target = &targets[i];
+ struct bpf_link *uprobe_link;
+ bool is_new;
+ int spec_id;
+
+ /* Spec ID can be either reused or newly allocated. If it is
+ * newly allocated, we'll need to fill out spec map, otherwise
+ * entire spec should be valid and can be just used by a new
+ * uprobe. We reuse spec when USDT arg spec is identical. We
+ * also never share specs between two different USDT
+ * attachments ("links"), so all the reused specs already
+ * share USDT cookie value implicitly.
+ */
+ err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
+ if (err)
+ goto err_out;
+
+ if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
+ err = -errno;
+ pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
+ spec_id, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+ if (!man->has_bpf_cookie &&
+ bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
+ err = -errno;
+ if (err == -EEXIST) {
+ pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
+ spec_id, usdt_provider, usdt_name, path);
+ } else {
+ pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
+ target->abs_ip, spec_id, usdt_provider, usdt_name,
+ path, err);
+ }
+ goto err_out;
+ }
+
+ opts.ref_ctr_offset = target->sema_off;
+ opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
+ uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
+ target->rel_ip, &opts);
+ err = libbpf_get_error(uprobe_link);
+ if (err) {
+ pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
+ i, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+
+ link->uprobes[i].link = uprobe_link;
+ link->uprobes[i].abs_ip = target->abs_ip;
+ link->uprobe_cnt++;
+ }
+
+ free(targets);
+ hashmap__free(specs_hash);
+ elf_end(elf);
+ close(fd);
+
+ return &link->link;
+
+err_out:
+ if (link)
+ bpf_link__destroy(&link->link);
+ free(targets);
+ hashmap__free(specs_hash);
+ if (elf)
+ elf_end(elf);
+ close(fd);
+ return libbpf_err_ptr(err);
+}
+
+/* Parse out USDT ELF note from '.note.stapsdt' section.
+ * Logic inspired by perf's code.
+ */
+static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
+ GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *note)
+{
+ const char *provider, *name, *args;
+ long addrs[3];
+ size_t len;
+
+ /* sanity check USDT note name and type first */
+ if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
+ return -EINVAL;
+ if (nhdr->n_type != USDT_NOTE_TYPE)
+ return -EINVAL;
+
+ /* sanity check USDT note contents ("description" in ELF terminology) */
+ len = nhdr->n_descsz;
+ data = data + desc_off;
+
+ /* +3 is the very minimum required to store three empty strings */
+ if (len < sizeof(addrs) + 3)
+ return -EINVAL;
+
+ /* get location, base, and semaphore addrs */
+ memcpy(&addrs, data, sizeof(addrs));
+
+ /* parse string fields: provider, name, args */
+ provider = data + sizeof(addrs);
+
+ name = (const char *)memchr(provider, '\0', data + len - provider);
+ if (!name) /* non-zero-terminated provider */
+ return -EINVAL;
+ name++;
+ if (name >= data + len || *name == '\0') /* missing or empty name */
+ return -EINVAL;
+
+ args = memchr(name, '\0', data + len - name);
+ if (!args) /* non-zero-terminated name */
+ return -EINVAL;
+ ++args;
+ if (args >= data + len) /* missing arguments spec */
+ return -EINVAL;
+
+ note->provider = provider;
+ note->name = name;
+ if (*args == '\0' || *args == ':')
+ note->args = "";
+ else
+ note->args = args;
+ note->loc_addr = addrs[0];
+ note->base_addr = addrs[1];
+ note->sema_addr = addrs[2];
+
+ return 0;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
+{
+ const char *s;
+ int len;
+
+ spec->usdt_cookie = usdt_cookie;
+ spec->arg_cnt = 0;
+
+ s = note->args;
+ while (s[0]) {
+ if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
+ pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
+ USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
+ return -E2BIG;
+ }
+
+ len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]);
+ if (len < 0)
+ return len;
+
+ s += len;
+ spec->arg_cnt++;
+ }
+
+ return 0;
+}
+
+/* Architecture-specific logic for parsing USDT argument location specs */
+
+#if defined(__x86_64__) || defined(__i386__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *names[4];
+ size_t pt_regs_off;
+ } reg_map[] = {
+#ifdef __x86_64__
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
+#else
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
+#endif
+ { {"rip", "eip", "", ""}, reg_off(rip, eip) },
+ { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
+ { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
+ { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
+ { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
+ { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
+ { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
+ { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
+ { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
+#undef reg_off
+#ifdef __x86_64__
+ { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
+ { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
+ { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
+ { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
+ { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
+ { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
+ { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
+ { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
+#endif
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
+ if (strcmp(reg_name, reg_map[i].names[j]) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -4@-20(%rbp) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -4@%eax */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@$71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__s390x__)
+
+/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ unsigned int reg;
+ int arg_sz, len;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, &reg, &len) == 3) {
+ /* Memory dereference case, e.g., -2@-28(%r15) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, &reg, &len) == 2) {
+ /* Register read case, e.g., -8@%r0 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__aarch64__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ int reg_num;
+
+ if (sscanf(reg_name, "x%d", &reg_num) == 1) {
+ if (reg_num >= 0 && reg_num < 31)
+ return offsetof(struct user_pt_regs, regs[reg_num]);
+ } else if (strcmp(reg_name, "sp") == 0) {
+ return offsetof(struct user_pt_regs, sp);
+ }
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[sp, 96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@x4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__riscv)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *name;
+ size_t pt_regs_off;
+ } reg_map[] = {
+ { "ra", offsetof(struct user_regs_struct, ra) },
+ { "sp", offsetof(struct user_regs_struct, sp) },
+ { "gp", offsetof(struct user_regs_struct, gp) },
+ { "tp", offsetof(struct user_regs_struct, tp) },
+ { "a0", offsetof(struct user_regs_struct, a0) },
+ { "a1", offsetof(struct user_regs_struct, a1) },
+ { "a2", offsetof(struct user_regs_struct, a2) },
+ { "a3", offsetof(struct user_regs_struct, a3) },
+ { "a4", offsetof(struct user_regs_struct, a4) },
+ { "a5", offsetof(struct user_regs_struct, a5) },
+ { "a6", offsetof(struct user_regs_struct, a6) },
+ { "a7", offsetof(struct user_regs_struct, a7) },
+ { "s0", offsetof(struct user_regs_struct, s0) },
+ { "s1", offsetof(struct user_regs_struct, s1) },
+ { "s2", offsetof(struct user_regs_struct, s2) },
+ { "s3", offsetof(struct user_regs_struct, s3) },
+ { "s4", offsetof(struct user_regs_struct, s4) },
+ { "s5", offsetof(struct user_regs_struct, s5) },
+ { "s6", offsetof(struct user_regs_struct, s6) },
+ { "s7", offsetof(struct user_regs_struct, s7) },
+ { "s8", offsetof(struct user_regs_struct, rv_s8) },
+ { "s9", offsetof(struct user_regs_struct, s9) },
+ { "s10", offsetof(struct user_regs_struct, s10) },
+ { "s11", offsetof(struct user_regs_struct, s11) },
+ { "t0", offsetof(struct user_regs_struct, t0) },
+ { "t1", offsetof(struct user_regs_struct, t1) },
+ { "t2", offsetof(struct user_regs_struct, t2) },
+ { "t3", offsetof(struct user_regs_struct, t3) },
+ { "t4", offsetof(struct user_regs_struct, t4) },
+ { "t5", offsetof(struct user_regs_struct, t5) },
+ { "t6", offsetof(struct user_regs_struct, t6) },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ if (strcmp(reg_name, reg_map[i].name) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -8@-88(s0) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@a1 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#else
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
+ return -ENOTSUP;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 3820608faf57..bafdc5373a13 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -168,9 +168,15 @@ $(OUTPUT)/%:%.c
$(call msg,BINARY,,$@)
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
-$(OUTPUT)/urandom_read: urandom_read.c
+$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
+ $(call msg,LIB,,$@)
+ $(Q)$(CC) $(CFLAGS) -fPIC $(LDFLAGS) $^ $(LDLIBS) --shared -o $@
+
+$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.c,$^) \
+ liburandom_read.so $(LDLIBS) \
+ -Wl,-rpath=. -Wl,--build-id=sha1 -o $@
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
@@ -328,12 +334,8 @@ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h \
- test_subskeleton.skel.h test_subskeleton_lib.skel.h
-
-# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
-# but that's created as a side-effect of the skel.h generation.
-test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
-test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
+ test_subskeleton.skel.h test_subskeleton_lib.skel.h \
+ test_usdt.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
@@ -346,6 +348,11 @@ test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
linked_maps.skel.h-deps := linked_maps1.o linked_maps2.o
+# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
+# but that's created as a side-effect of the skel.h generation.
+test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
+test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
+test_usdt.skel.h-deps := test_usdt.o test_usdt_multispec.o
LINKED_BPF_SRCS := $(patsubst %.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
@@ -400,6 +407,7 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
$(wildcard $(BPFDIR)/bpf_*.h) \
+ $(wildcard $(BPFDIR)/*.bpf.h) \
| $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS))
@@ -491,6 +499,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
btf_helpers.c flow_dissector_load.h \
cap_helpers.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
+ $(OUTPUT)/liburandom_read.so \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index f973320e6dbf..f061cc20e776 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -8,7 +8,6 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/sysinfo.h>
-#include <sys/resource.h>
#include <signal.h>
#include "bench.h"
#include "testing_helpers.h"
diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h
deleted file mode 100644
index 9dac9b30f8ef..000000000000
--- a/tools/testing/selftests/bpf/bpf_rlimit.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <sys/resource.h>
-#include <stdio.h>
-
-static __attribute__((constructor)) void bpf_rlimit_ctor(void)
-{
- struct rlimit rlim_old, rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- /* For the sake of running the test cases, we temporarily
- * set rlimit to infinity in order for kernel to focus on
- * errors from actual test cases and not getting noise
- * from hitting memlock limits. The limit is on per-process
- * basis and not a global one, hence destructor not really
- * needed here.
- */
- if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
- perror("Unable to lift memlock rlimit");
- /* Trying out lower limit, but expect potential test
- * case failures from this!
- */
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
- }
-}
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 87fd1aa323a9..c8be6406777f 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -11,7 +11,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
@@ -25,9 +24,8 @@ static void load_and_attach_program(void)
int prog_fd, ret;
struct bpf_object *obj;
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
- if (ret)
- error(1, 0, "failed to enable libbpf strict mode: %d", ret);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name,
cfg_map_name, NULL, &prog_fd, NULL);
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
index 3a7b82bd9e94..e021cc67dc02 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c
@@ -20,7 +20,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
@@ -67,6 +66,9 @@ int main(int argc, char **argv)
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
new file mode 100644
index 000000000000..b17bfa0e0aac
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void init_test_filter_set(struct test_filter_set *set)
+{
+ set->cnt = 0;
+ set->tests = NULL;
+}
+
+static void free_test_filter_set(struct test_filter_set *set)
+{
+ int i, j;
+
+ for (i = 0; i < set->cnt; i++) {
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
+ free(set->tests[i].subtests);
+ free(set->tests[i].name);
+ }
+
+ free(set->tests);
+ init_test_filter_set(set);
+}
+
+static void test_parse_test_list(void)
+{
+ struct test_filter_set set;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "test filters count"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie",
+ &set,
+ true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true),
+ "parsing");
+ ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing");
+ ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 3, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+error:
+ free_test_filter_set(&set);
+}
+
+void test_arg_parsing(void)
+{
+ if (test__start_subtest("test_parse_test_list"))
+ test_parse_test_list();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index d48f6e533e1e..c0c6d410751d 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -11,15 +11,22 @@ static void trigger_func(void)
asm volatile ("");
}
+/* attach point for byname uprobe */
+static void trigger_func2(void)
+{
+ asm volatile ("");
+}
+
void test_attach_probe(void)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
- int duration = 0;
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
struct test_attach_probe* skel;
ssize_t uprobe_offset, ref_ctr_offset;
+ struct bpf_link *uprobe_err_link;
bool legacy;
+ char *mem;
/* Check if new-style kprobe/uprobe API is supported.
* Kernels that support new FD-based kprobe and uprobe BPF attachment
@@ -43,9 +50,9 @@ void test_attach_probe(void)
return;
skel = test_attach_probe__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
- if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
+ if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
goto cleanup;
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
@@ -90,25 +97,73 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
- /* trigger & validate kprobe && kretprobe */
- usleep(1);
+ /* verify auto-attach fails for old-style uprobe definition */
+ uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
+ if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
+ "auto-attach should fail for old-style name"))
+ goto cleanup;
+
+ uprobe_opts.func_name = "trigger_func2";
+ uprobe_opts.retprobe = false;
+ uprobe_opts.ref_ctr_offset = 0;
+ skel->links.handle_uprobe_byname =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
+ 0 /* this pid */,
+ "/proc/self/exe",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
+ goto cleanup;
+
+ /* verify auto-attach works */
+ skel->links.handle_uretprobe_byname =
+ bpf_program__attach(skel->progs.handle_uretprobe_byname);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
+ goto cleanup;
- if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
- "wrong kprobe res: %d\n", skel->bss->kprobe_res))
+ /* test attach by name for a library function, using the library
+ * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
+ */
+ uprobe_opts.func_name = "malloc";
+ uprobe_opts.retprobe = false;
+ skel->links.handle_uprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
+ 0 /* this pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
goto cleanup;
- if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
- "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
+
+ uprobe_opts.func_name = "free";
+ uprobe_opts.retprobe = true;
+ skel->links.handle_uretprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
+ -1 /* any pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
goto cleanup;
+ /* trigger & validate kprobe && kretprobe */
+ usleep(1);
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ mem = malloc(1);
+ free(mem);
+
/* trigger & validate uprobe & uretprobe */
trigger_func();
- if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
- "wrong uprobe res: %d\n", skel->bss->uprobe_res))
- goto cleanup;
- if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
- "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
- goto cleanup;
+ /* trigger & validate uprobe attached by name */
+ trigger_func2();
+
+ ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+ ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
+ ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
+ ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
+ ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
cleanup:
test_attach_probe__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 5142a7d130b2..2c403ddc8076 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -1192,8 +1192,6 @@ static void str_strip_first_line(char *str)
*dst = '\0';
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
static void test_task_vma(void)
{
int err, iter_fd = -1, proc_maps_fd = -1;
@@ -1229,7 +1227,7 @@ static void test_task_vma(void)
len = 0;
while (len < CMP_BUFFER_SIZE) {
err = read_fd_into_buffer(iter_fd, task_vma_output + len,
- min(read_size, CMP_BUFFER_SIZE - len));
+ MIN(read_size, CMP_BUFFER_SIZE - len));
if (!err)
break;
if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
index d43f548c572c..a4d0cc9d3367 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -36,13 +36,13 @@ struct test_config {
void (*bpf_destroy)(void *);
};
-enum test_state {
+enum bpf_test_state {
_TS_INVALID,
TS_MODULE_LOAD,
TS_MODULE_LOAD_FAIL,
};
-static _Atomic enum test_state state = _TS_INVALID;
+static _Atomic enum bpf_test_state state = _TS_INVALID;
static int sys_finit_module(int fd, const char *param_values, int flags)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 8f7a1cef7d87..e9a9a31b2ffe 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -10,8 +10,6 @@
#include "bpf_tcp_nogpl.skel.h"
#include "bpf_dctcp_release.skel.h"
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
@@ -53,7 +51,7 @@ static void *server(void *arg)
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_sent = send(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_sent == -1 && errno == EINTR)
continue;
if (nr_sent == -1) {
@@ -146,7 +144,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_recv = recv(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_recv == -1 && errno == EINTR)
continue;
if (nr_recv == -1)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index ec823561b912..ba5bde53d418 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -8,7 +8,6 @@
#include <linux/filter.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
-#include <sys/resource.h>
#include <libelf.h>
#include <gelf.h>
#include <string.h>
@@ -3974,6 +3973,105 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 1,
},
+{
+ .descr = "type_tag test #2, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_CONST_ENC(3), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #3, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #4, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #5, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(1), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "type_tag test #6, type tag order",
+ .raw_types = {
+ BTF_PTR_ENC(2), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
+ BTF_PTR_ENC(6), /* [5] */
+ BTF_CONST_ENC(2), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
}; /* struct btf_raw_test raw_tests[] */
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3ee2107bbf7a..fe1f0f26ea14 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -53,7 +53,7 @@ void test_fexit_stress(void)
&trace_opts);
if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
- link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
+ link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL);
if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))
goto out;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c
index 044df13ee069..754e80937e5d 100644
--- a/tools/testing/selftests/bpf/prog_tests/for_each.c
+++ b/tools/testing/selftests/bpf/prog_tests/for_each.c
@@ -4,6 +4,7 @@
#include <network_helpers.h>
#include "for_each_hash_map_elem.skel.h"
#include "for_each_array_map_elem.skel.h"
+#include "for_each_map_elem_write_key.skel.h"
static unsigned int duration;
@@ -129,10 +130,21 @@ out:
for_each_array_map_elem__destroy(skel);
}
+static void test_write_map_key(void)
+{
+ struct for_each_map_elem_write_key *skel;
+
+ skel = for_each_map_elem_write_key__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
+ for_each_map_elem_write_key__destroy(skel);
+}
+
void test_for_each(void)
{
if (test__start_subtest("hash_map"))
test_hash_map();
if (test__start_subtest("array_map"))
test_array_map();
+ if (test__start_subtest("write_map_key"))
+ test_write_map_key();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
index e1de5f80c3b2..0354f9b82c65 100644
--- a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
+++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
@@ -6,11 +6,10 @@
void test_helper_restricted(void)
{
int prog_i = 0, prog_cnt;
- int duration = 0;
do {
struct test_helper_restricted *test;
- int maybeOK;
+ int err;
test = test_helper_restricted__open();
if (!ASSERT_OK_PTR(test, "open"))
@@ -21,12 +20,11 @@ void test_helper_restricted(void)
for (int j = 0; j < prog_cnt; ++j) {
struct bpf_program *prog = *test->skeleton->progs[j].prog;
- maybeOK = bpf_program__set_autoload(prog, prog_i == j);
- ASSERT_OK(maybeOK, "set autoload");
+ bpf_program__set_autoload(prog, true);
}
- maybeOK = test_helper_restricted__load(test);
- CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
+ err = test_helper_restricted__load(test);
+ ASSERT_ERR(err, "load_should_fail");
test_helper_restricted__destroy(test);
} while (++prog_i < prog_cnt);
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
index f6933b06daf8..1d7a2f1e0731 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -138,12 +138,16 @@ cleanup:
test_ksyms_weak_lskel__destroy(skel);
}
-static void test_write_check(void)
+static void test_write_check(bool test_handler1)
{
struct test_ksyms_btf_write_check *skel;
- skel = test_ksyms_btf_write_check__open_and_load();
- ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n");
+ skel = test_ksyms_btf_write_check__open();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open"))
+ return;
+ bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false);
+ ASSERT_ERR(test_ksyms_btf_write_check__load(skel),
+ "unexpected load of a prog writing to ksym memory\n");
test_ksyms_btf_write_check__destroy(skel);
}
@@ -179,6 +183,9 @@ void test_ksyms_btf(void)
if (test__start_subtest("weak_ksyms_lskel"))
test_weak_syms_lskel();
- if (test__start_subtest("write_check"))
- test_write_check();
+ if (test__start_subtest("write_check1"))
+ test_write_check(true);
+
+ if (test__start_subtest("write_check2"))
+ test_write_check(false);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
index e9916f2817ec..cad664546912 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
@@ -14,6 +14,12 @@ void test_linked_funcs(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and
+ * are set to not autoload by default
+ */
+ bpf_program__set_autoload(skel->progs.handler1, true);
+ bpf_program__set_autoload(skel->progs.handler2, true);
+
skel->rodata->my_tid = syscall(SYS_gettid);
skel->bss->syscall_id = SYS_getpgid;
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
new file mode 100644
index 000000000000..be3a956cb3a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_fixup.skel.h"
+
+enum trunc_type {
+ TRUNC_NONE,
+ TRUNC_PARTIAL,
+ TRUNC_FULL,
+};
+
+static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo, true);
+ memset(log_buf, 0, sizeof(log_buf));
+ bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "0: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_sz> ",
+ "log_buf_part1");
+
+ switch (trunc_type) {
+ case TRUNC_NONE:
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "log_buf_end");
+ break;
+ case TRUNC_PARTIAL:
+ /* we should get full libbpf message patch */
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ case TRUNC_FULL:
+ /* we shouldn't get second part of libbpf message patch */
+ ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"),
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ }
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void bad_core_relo_subprog(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo_subprog, true);
+ bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ /* there should be no prog loading log because we specified per-prog log buf */
+ ASSERT_HAS_SUBSTR(log_buf,
+ ": <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_off> ",
+ "log_buf");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+void test_log_fixup(void)
+{
+ if (test__start_subtest("bad_core_relo_trunc_none"))
+ bad_core_relo(0, TRUNC_NONE /* full buf */);
+ if (test__start_subtest("bad_core_relo_trunc_partial"))
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
+ if (test__start_subtest("bad_core_relo_trunc_full"))
+ bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */);
+ if (test__start_subtest("bad_core_relo_subprog"))
+ bad_core_relo_subprog();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
new file mode 100644
index 000000000000..9e2fbda64a65
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "map_kptr.skel.h"
+
+void test_map_kptr(void)
+{
+ struct map_kptr *skel;
+ int key = 0, ret;
+ char buf[24];
+
+ skel = map_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
+ return;
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
+ ASSERT_OK(ret, "array_map update");
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
+ ASSERT_OK(ret, "array_map update2");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "hash_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key);
+ ASSERT_OK(ret, "hash_map delete");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0);
+ ASSERT_OK(ret, "hash_malloc_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key);
+ ASSERT_OK(ret, "hash_malloc_map delete");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "lru_hash_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key);
+ ASSERT_OK(ret, "lru_hash_map delete");
+
+ map_kptr__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c
index 954964f0ac3d..d3915c58d0e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/netcnt.c
+++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c
@@ -25,7 +25,7 @@ void serial_test_netcnt(void)
if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load"))
return;
- nproc = get_nprocs_conf();
+ nproc = bpf_num_possible_cpus();
percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)"))
goto err;
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
new file mode 100644
index 000000000000..14f2796076e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void clear_test_state(struct test_state *state)
+{
+ state->error_cnt = 0;
+ state->sub_succ_cnt = 0;
+ state->skip_cnt = 0;
+}
+
+void test_prog_tests_framework(void)
+{
+ struct test_state *state = env.test_state;
+
+ /* in all the ASSERT calls below we need to return on the first
+ * error due to the fact that we are cleaning the test state after
+ * each dummy subtest
+ */
+
+ /* test we properly count skipped tests with subtests */
+ if (test__start_subtest("test_good_subtest"))
+ test__end_subtest();
+ if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_fail_subtest")) {
+ test__fail();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 873323fb18ba..739d2ea6ca55 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -1,21 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-static void toggle_object_autoload_progs(const struct bpf_object *obj,
- const char *name_load)
-{
- struct bpf_program *prog;
-
- bpf_object__for_each_program(prog, obj) {
- const char *name = bpf_program__name(prog);
-
- if (!strcmp(name_load, name))
- bpf_program__set_autoload(prog, true);
- else
- bpf_program__set_autoload(prog, false);
- }
-}
-
void test_reference_tracking(void)
{
const char *file = "test_sk_lookup_kern.o";
@@ -39,6 +24,7 @@ void test_reference_tracking(void)
goto cleanup;
bpf_object__for_each_program(prog, obj_iter) {
+ struct bpf_program *p;
const char *name;
name = bpf_program__name(prog);
@@ -49,7 +35,12 @@ void test_reference_tracking(void)
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
goto cleanup;
- toggle_object_autoload_progs(obj, name);
+ /* all programs are not loaded by default, so just set
+ * autoload to true for the single prog under test
+ */
+ p = bpf_object__find_program_by_name(obj, name);
+ bpf_program__set_autoload(p, true);
+
/* Expect verifier failure if test name has 'err' */
if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
new file mode 100644
index 000000000000..d7f83c0a40a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_load_bytes.skel.h"
+
+void test_skb_load_bytes(void)
+{
+ struct skb_load_bytes *skel;
+ int err, prog_fd, test_result;
+ struct __sk_buff skb = { 0 };
+
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = skb_load_bytes__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.skb_process);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)(-1);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, -EFAULT, "offset -1"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)10;
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, 0, "offset 10"))
+ goto out;
+
+out:
+ skb_load_bytes__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 394ebfc3bbf3..4be6fdb78c6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -83,8 +83,6 @@ cleanup:
test_snprintf__destroy(skel);
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
static int load_single_snprintf(char *fmt)
{
@@ -95,7 +93,7 @@ static int load_single_snprintf(char *fmt)
if (!skel)
return -EINVAL;
- memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10));
+ memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10));
ret = test_snprintf_single__load(skel);
test_snprintf_single__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 7ad66a247c02..958dae769c52 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -949,7 +949,6 @@ fail:
return -1;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
enum {
SRC_TO_TARGET = 0,
TARGET_TO_SRC = 1,
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
index 509e21d5cb9d..b90ee47d3111 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
@@ -81,6 +81,7 @@ void test_test_global_funcs(void)
{ "test_global_func14.o", "reference type('FWD S') size cannot be determined" },
{ "test_global_func15.o", "At program exit the register R0 has value" },
{ "test_global_func16.o", "invalid indirect read from stack" },
+ { "test_global_func17.o", "Caller passes invalid args into func#1" },
};
libbpf_print_fn_t old_print_fn = NULL;
int err, i, duration = 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
index b57a3009465f..7ddd6615b7e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
@@ -44,16 +44,12 @@ static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
static void test_strncmp_ret(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err, got;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
bpf_program__set_autoload(skel->progs.do_strncmp, true);
err = strncmp_test__load(skel);
@@ -91,18 +87,13 @@ out:
static void test_strncmp_bad_not_const_str_size(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
@@ -113,18 +104,13 @@ static void test_strncmp_bad_not_const_str_size(void)
static void test_strncmp_bad_writable_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_writable_target");
@@ -135,18 +121,13 @@ static void test_strncmp_bad_writable_target(void)
static void test_strncmp_bad_not_null_term_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
new file mode 100644
index 000000000000..35b87c7ba5be
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include "test_uprobe_autoattach.skel.h"
+
+/* uprobe attach point */
+static noinline int autoattach_trigger_func(int arg)
+{
+ asm volatile ("");
+ return arg + 1;
+}
+
+void test_uprobe_autoattach(void)
+{
+ struct test_uprobe_autoattach *skel;
+ int trigger_val = 100, trigger_ret;
+ size_t malloc_sz = 1;
+ char *mem;
+
+ skel = test_uprobe_autoattach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate uprobe & uretprobe */
+ trigger_ret = autoattach_trigger_func(trigger_val);
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ mem = malloc(malloc_sz);
+
+ ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
+
+ free(mem);
+cleanup:
+ test_uprobe_autoattach__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
new file mode 100644
index 000000000000..a71f51bdc08d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#define _SDT_HAS_SEMAPHORES 1
+#include "../sdt.h"
+
+#include "test_usdt.skel.h"
+#include "test_urandom_usdt.skel.h"
+
+int lets_test_this(int);
+
+static volatile int idx = 2;
+static volatile __u64 bla = 0xFEDCBA9876543210ULL;
+static volatile short nums[] = {-1, -2, -3, };
+
+static volatile struct {
+ int x;
+ signed char y;
+} t1 = { 1, -127 };
+
+#define SEC(name) __attribute__((section(name), used))
+
+unsigned short test_usdt0_semaphore SEC(".probes");
+unsigned short test_usdt3_semaphore SEC(".probes");
+unsigned short test_usdt12_semaphore SEC(".probes");
+
+static void __always_inline trigger_func(int x) {
+ long y = 42;
+
+ if (test_usdt0_semaphore)
+ STAP_PROBE(test, usdt0);
+ if (test_usdt3_semaphore)
+ STAP_PROBE3(test, usdt3, x, y, &bla);
+ if (test_usdt12_semaphore) {
+ STAP_PROBE12(test, usdt12,
+ x, x + 1, y, x + y, 5,
+ y / 7, bla, &bla, -9, nums[x],
+ nums[idx], t1.y);
+ }
+}
+
+static void subtest_basic_usdt(void)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt0 won't be auto-attached */
+ opts.usdt_cookie = 0xcafedeadbeeffeed;
+ skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
+ 0 /*self*/, "/proc/self/exe",
+ "test", "usdt0", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
+ goto cleanup;
+
+ trigger_func(1);
+
+ ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called");
+
+ ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie");
+ ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
+ ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
+
+ /* auto-attached usdt3 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+
+ /* auto-attached usdt12 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie");
+ ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt");
+
+ ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5");
+ ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6");
+ ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7");
+ ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8");
+ ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9");
+ ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10");
+ ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11");
+ ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12");
+
+ /* trigger_func() is marked __always_inline, so USDT invocations will be
+ * inlined in two different places, meaning that each USDT will have
+ * at least 2 different places to be attached to. This verifies that
+ * bpf_program__attach_usdt() handles this properly and attaches to
+ * all possible places of USDT invocation.
+ */
+ trigger_func(2);
+
+ ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called");
+
+ /* only check values that depend on trigger_func()'s input value */
+ ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
+
+ ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10");
+
+ /* detach and re-attach usdt3 */
+ bpf_link__destroy(skel->links.usdt3);
+
+ opts.usdt_cookie = 0xBADC00C51E;
+ skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */,
+ "/proc/self/exe", "test", "usdt3", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
+ goto cleanup;
+
+ trigger_func(3);
+
+ ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called");
+ /* this time usdt3 has custom cookie */
+ ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+
+unsigned short test_usdt_100_semaphore SEC(".probes");
+unsigned short test_usdt_300_semaphore SEC(".probes");
+unsigned short test_usdt_400_semaphore SEC(".probes");
+
+#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \
+ F(X+5); F(X+6); F(X+7); F(X+8); F(X+9);
+#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \
+ R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90);
+
+/* carefully control that we get exactly 100 inlines by preventing inlining */
+static void __always_inline f100(int x)
+{
+ STAP_PROBE1(test, usdt_100, x);
+}
+
+__weak void trigger_100_usdts(void)
+{
+ R100(f100, 0);
+}
+
+/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as
+ * many slots for specs. It's important that each STAP_PROBE2() invocation
+ * (after untolling) gets different arg spec due to compiler inlining i as
+ * a constant
+ */
+static void __always_inline f300(int x)
+{
+ STAP_PROBE1(test, usdt_300, x);
+}
+
+__weak void trigger_300_usdts(void)
+{
+ R100(f300, 0);
+ R100(f300, 100);
+ R100(f300, 200);
+}
+
+static void __always_inline f400(int x __attribute__((unused)))
+{
+ static int y;
+
+ STAP_PROBE1(test, usdt_400, y++);
+}
+
+/* this time we have 400 different USDT call sites, but they have uniform
+ * argument location, so libbpf's spec string deduplication logic should keep
+ * spec count use very small and so we should be able to attach to all 400
+ * call sites
+ */
+__weak void trigger_400_usdts(void)
+{
+ R100(f400, 0);
+ R100(f400, 100);
+ R100(f400, 200);
+ R100(f400, 300);
+}
+
+static void subtest_multispec_usdt(void)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err, i;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt_100 is auto-attached and there are 100 inlined call sites,
+ * let's validate that all of them are properly attached to and
+ * handled from BPF side
+ */
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+
+ /* Stress test free spec ID tracking. By default libbpf allows up to
+ * 256 specs to be used, so if we don't return free spec IDs back
+ * after few detachments and re-attachments we should run out of
+ * available spec IDs.
+ */
+ for (i = 0; i < 2; i++) {
+ bpf_link__destroy(skel->links.usdt_100);
+
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_100", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach"))
+ goto cleanup;
+
+ bss->usdt_100_sum = 0;
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+ }
+
+ /* Now let's step it up and try to attach USDT that requires more than
+ * 256 attach points with different specs for each.
+ * Note that we need trigger_300_usdts() only to actually have 300
+ * USDT call sites, we are not going to actually trace them.
+ */
+ trigger_300_usdts();
+
+ /* we'll reuse usdt_100 BPF program for usdt_300 test */
+ bpf_link__destroy(skel->links.usdt_100);
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
+ "test", "usdt_300", NULL);
+ err = -errno;
+ if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach"))
+ goto cleanup;
+ ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err");
+
+ /* let's check that there are no "dangling" BPF programs attached due
+ * to partial success of the above test:usdt_300 attachment
+ */
+ bss->usdt_100_called = 0;
+ bss->usdt_100_sum = 0;
+
+ f300(777); /* this is 301st instance of usdt_300 */
+
+ ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
+ ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
+
+ /* This time we have USDT with 400 inlined invocations, but arg specs
+ * should be the same across all sites, so libbpf will only need to
+ * use one spec and thus we'll be able to attach 400 uprobes
+ * successfully.
+ *
+ * Again, we are reusing usdt_100 BPF program.
+ */
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_400", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach"))
+ goto cleanup;
+
+ trigger_400_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called");
+ ASSERT_EQ(bss->usdt_100_sum, 399 * 400 / 2, "usdt_400_sum");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+
+static FILE *urand_spawn(int *pid)
+{
+ FILE *f;
+
+ /* urandom_read's stdout is wired into f */
+ f = popen("./urandom_read 1 report-pid", "r");
+ if (!f)
+ return NULL;
+
+ if (fscanf(f, "%d", pid) != 1) {
+ pclose(f);
+ return NULL;
+ }
+
+ return f;
+}
+
+static int urand_trigger(FILE **urand_pipe)
+{
+ int exit_code;
+
+ /* pclose() waits for child process to exit and returns their exit code */
+ exit_code = pclose(*urand_pipe);
+ *urand_pipe = NULL;
+
+ return exit_code;
+}
+
+static void subtest_urandom_usdt(bool auto_attach)
+{
+ struct test_urandom_usdt *skel;
+ struct test_urandom_usdt__bss *bss;
+ struct bpf_link *l;
+ FILE *urand_pipe = NULL;
+ int err, urand_pid = 0;
+
+ skel = test_urandom_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ urand_pipe = urand_spawn(&urand_pid);
+ if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
+ goto cleanup;
+
+ bss = skel->bss;
+ bss->urand_pid = urand_pid;
+
+ if (auto_attach) {
+ err = test_urandom_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_auto_attach"))
+ goto cleanup;
+ } else {
+ l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_without_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_with_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_with_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_with_sema = l;
+
+ }
+
+ /* trigger urandom_read USDTs */
+ ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
+
+ ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt");
+ ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum");
+
+ ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt");
+ ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum");
+
+cleanup:
+ if (urand_pipe)
+ pclose(urand_pipe);
+ test_urandom_usdt__destroy(skel);
+}
+
+void test_usdt(void)
+{
+ if (test__start_subtest("basic"))
+ subtest_basic_usdt();
+ if (test__start_subtest("multispec"))
+ subtest_multispec_usdt();
+ if (test__start_subtest("urand_auto_attach"))
+ subtest_urandom_usdt(true /* auto_attach */);
+ if (test__start_subtest("urand_pid_attach"))
+ subtest_urandom_usdt(false /* auto_attach */);
+}
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
index f5ca142abf8f..dd9b30a0f0fc 100644
--- a/tools/testing/selftests/bpf/progs/exhandler_kern.c
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -7,6 +7,8 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+
char _license[] SEC("license") = "GPL";
unsigned int exception_triggered;
@@ -37,7 +39,16 @@ int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
*/
work = task->task_works;
func = work->func;
- if (!work && !func)
- exception_triggered++;
+ /* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
+ * To workaround the issue, use barrier_var() and rewrite as below to
+ * prevent compiler from generating verifier-unfriendly code.
+ */
+ barrier_var(work);
+ if (work)
+ return 0;
+ barrier_var(func);
+ if (func)
+ return 0;
+ exception_triggered++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
new file mode 100644
index 000000000000..8e545865ea33
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map SEC(".maps");
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ void *data)
+{
+ bpf_get_current_comm(key, sizeof(*key));
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int test_map_key_write(const void *ctx)
+{
+ bpf_for_each_map_elem(&array_map, check_array_elem, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
index b964ec1390c2..b05571bc67d5 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -4,6 +4,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
/* weak and shared between two files */
const volatile int my_tid __weak;
@@ -44,6 +45,13 @@ void set_output_ctx1(__u64 *ctx)
/* this weak instance should win because it's the first one */
__weak int set_output_weak(int x)
{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = bpf_core_type_size(struct task_struct);
+
output_weak1 = x;
return x;
}
@@ -53,12 +61,17 @@ extern int set_output_val2(int x);
/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
__hidden extern void set_output_ctx2(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler1, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val2(1000);
set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
index 575e958e60b7..ee7e3848ee4f 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -4,6 +4,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
/* weak and shared between both files */
const volatile int my_tid __weak;
@@ -44,6 +45,13 @@ void set_output_ctx2(__u64 *ctx)
/* this weak instance should lose, because it will be processed second */
__weak int set_output_weak(int x)
{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = 2 * bpf_core_type_size(struct task_struct);
+
output_weak2 = x;
return 2 * x;
}
@@ -53,12 +61,17 @@ extern int set_output_val1(int x);
/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
__hidden extern void set_output_ctx1(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler2, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val1(2000);
set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
new file mode 100644
index 000000000000..1b0e0409eaa5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr *unref_ptr;
+ struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} hash_map SEC(".maps");
+
+struct hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_malloc_map SEC(".maps");
+
+struct lru_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, 1); \
+ __uint(key_size, sizeof(int)); \
+ __uint(value_size, sizeof(int)); \
+ __array(values, struct inner_map_type); \
+ } name SEC(".maps") = { \
+ .values = { [0] = &inner_map_type }, \
+ }
+
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
+
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+
+static void test_kptr_unref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->unref_ptr;
+ /* store untrusted_ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store untrusted_ptr_ */
+ v->unref_ptr = p;
+ /* store NULL */
+ v->unref_ptr = NULL;
+}
+
+static void test_kptr_ref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->ref_ptr;
+ /* store ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store NULL */
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ /* store ptr_ */
+ v->unref_ptr = p;
+ bpf_kfunc_call_test_release(p);
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return;
+ /* store ptr_ */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr_get(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr(struct map_value *v)
+{
+ test_kptr_unref(v);
+ test_kptr_ref(v);
+ test_kptr_get(v);
+}
+
+SEC("tc")
+int test_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int i, key = 0;
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tc")
+int test_map_in_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int i, key = 0;
+ void *map;
+
+#define TEST(map_in_map) \
+ map = bpf_map_lookup_elem(&map_in_map, &key); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_of_array_maps);
+ TEST(array_of_hash_maps);
+ TEST(array_of_hash_malloc_maps);
+ TEST(array_of_lru_hash_maps);
+ TEST(hash_of_array_maps);
+ TEST(hash_of_hash_maps);
+ TEST(hash_of_hash_malloc_maps);
+ TEST(hash_of_lru_hash_maps);
+
+#undef TEST
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index b3fcb5274ee0..f793280a3238 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -35,10 +35,10 @@ int oncpu(void *ctx)
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
- if (val > 0)
+ if (val >= 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
- if (val > 0)
+ if (val >= 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 4896fdf816f7..92331053dba3 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -826,8 +826,9 @@ out:
SEC("kprobe/vfs_link")
int BPF_KPROBE(kprobe__vfs_link,
- struct dentry* old_dentry, struct inode* dir,
- struct dentry* new_dentry, struct inode** delegated_inode)
+ struct dentry* old_dentry, struct user_namespace *mnt_userns,
+ struct inode* dir, struct dentry* new_dentry,
+ struct inode** delegated_inode)
{
struct bpf_func_stats_ctx stats_ctx;
bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 1ed28882daf3..5d3dc4d66d47 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -299,7 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
#else
+#ifdef UNROLL_COUNT
+#pragma clang loop unroll_count(UNROLL_COUNT)
+#else
#pragma clang loop unroll(full)
+#endif
#endif /* NO_UNROLL */
/* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
index cb49b89e37cd..ce1aa5189cc4 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
-/* clang will not unroll the loop 600 times.
- * Instead it will unroll it to the amount it deemed
- * appropriate, but the loop will still execute 600 times.
- * Total program size is around 90k insns
+/* Full unroll of 600 iterations will have total
+ * program size close to 298k insns and this may
+ * cause BPF_JMP insn out of 16-bit integer range.
+ * So limit the unroll size to 150 so the
+ * total program size is around 80k insns but
+ * the loop will still execute 600 times.
*/
+#define UNROLL_COUNT 150
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/skb_load_bytes.c b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
new file mode 100644
index 000000000000..e4252fd973be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 load_offset = 0;
+int test_result = 0;
+
+SEC("tc")
+int skb_process(struct __sk_buff *skb)
+{
+ char buf[16];
+
+ test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/strncmp_test.c b/tools/testing/selftests/bpf/progs/strncmp_test.c
index 900d930d48a8..769668feed48 100644
--- a/tools/testing/selftests/bpf/progs/strncmp_test.c
+++ b/tools/testing/selftests/bpf/progs/strncmp_test.c
@@ -19,7 +19,7 @@ unsigned int no_const_str_size = STRNCMP_STR_SZ;
char _license[] SEC("license") = "GPL";
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int do_strncmp(void *ctx)
{
if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
@@ -29,7 +29,7 @@ int do_strncmp(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_const_str_size(void *ctx)
{
/* The value of string size is not const, so will fail */
@@ -37,7 +37,7 @@ int strncmp_bad_not_const_str_size(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_writable_target(void *ctx)
{
/* Compared target is not read-only, so will fail */
@@ -45,7 +45,7 @@ int strncmp_bad_writable_target(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_null_term_target(void *ctx)
{
/* Compared target is not null-terminated, so will fail */
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 8056a4c6d918..af994d16bb10 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -10,6 +10,10 @@ int kprobe_res = 0;
int kretprobe_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
+int uprobe_byname_res = 0;
+int uretprobe_byname_res = 0;
+int uprobe_byname2_res = 0;
+int uretprobe_byname2_res = 0;
SEC("kprobe/sys_nanosleep")
int handle_kprobe(struct pt_regs *ctx)
@@ -25,18 +29,51 @@ int BPF_KRETPROBE(handle_kretprobe)
return 0;
}
-SEC("uprobe/trigger_func")
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
uprobe_res = 3;
return 0;
}
-SEC("uretprobe/trigger_func")
+SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
uretprobe_res = 4;
return 0;
}
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+/* use auto-attach format for section definition. */
+SEC("uretprobe//proc/self/exe:trigger_func2")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_res = 6;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname2(struct pt_regs *ctx)
+{
+ unsigned int size = PT_REGS_PARM1(ctx);
+
+ /* verify malloc size */
+ if (size == 1)
+ uprobe_byname2_res = 7;
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe_byname2(struct pt_regs *ctx)
+{
+ uretprobe_byname2_res = 8;
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
index 2d3a7710e2ce..0e2222968918 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -37,14 +37,14 @@ int handle_kretprobe(struct pt_regs *ctx)
return 0;
}
-SEC("uprobe/trigger_func")
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
update(ctx, &uprobe_res);
return 0;
}
-SEC("uretprobe/trigger_func")
+SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
update(ctx, &uretprobe_res);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
new file mode 100644
index 000000000000..2b8b9b8ba018
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline int foo(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+const volatile int i;
+
+SEC("tc")
+int test_cls(struct __sk_buff *skb)
+{
+ return foo((int *)&i);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
index 68d64c365f90..20ef9d433b97 100644
--- a/tools/testing/selftests/bpf/progs/test_helper_restricted.c
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -56,7 +56,7 @@ static void spin_lock_work(void)
}
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_timer(void *ctx)
{
timer_work();
@@ -64,7 +64,7 @@ int raw_tp_timer(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_timer(void *ctx)
{
timer_work();
@@ -72,7 +72,7 @@ int tp_timer(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_timer(void *ctx)
{
timer_work();
@@ -80,7 +80,7 @@ int kprobe_timer(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_timer(void *ctx)
{
timer_work();
@@ -88,7 +88,7 @@ int perf_event_timer(void *ctx)
return 0;
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -96,7 +96,7 @@ int raw_tp_spin_lock(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -104,7 +104,7 @@ int tp_spin_lock(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
@@ -112,7 +112,7 @@ int kprobe_spin_lock(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_spin_lock(void *ctx)
{
spin_lock_work();
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
index 2180c41cd890..a72a5bf3812a 100644
--- a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
@@ -8,7 +8,7 @@
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
-int handler(const void *ctx)
+int handler1(const void *ctx)
{
int *active;
__u32 cpu;
@@ -26,4 +26,20 @@ int handler(const void *ctx)
return 0;
}
+__noinline int write_active(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ int *active;
+ __u32 cpu;
+
+ active = bpf_this_cpu_ptr(&bpf_prog_active);
+ write_active(active);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index 19e4d2071c60..c8bc0c6947aa 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -218,7 +218,7 @@ static __noinline bool get_packet_dst(struct real_definition **real,
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c
new file mode 100644
index 000000000000..a78980d897b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct___bad {
+ int pid;
+ int fake_field;
+ void *fake_field_subprog;
+} __attribute__((preserve_access_index));
+
+SEC("?raw_tp/sys_enter")
+int bad_relo(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bpf_core_field_size(t->fake_field);
+}
+
+static __noinline int bad_subprog(void)
+{
+ static struct task_struct___bad *t;
+
+ /* ugliness below is a field offset relocation */
+ return (void *)&t->fake_field_subprog - (void *)t;
+}
+
+SEC("?raw_tp/sys_enter")
+int bad_relo_subprog(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bad_subprog() + bpf_core_field_size(t->pid);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 02f79356d5eb..98c6493d9b91 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -89,7 +89,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -121,7 +120,6 @@ assign:
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -161,7 +159,7 @@ assign:
SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple, ln = {0};
+ struct bpf_sock_tuple *tuple;
bool ipv4 = false;
bool tcp = false;
int tuple_len;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index 40f161480a2f..b502e5c92e33 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -52,7 +52,7 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
@@ -78,7 +78,7 @@ int sk_lookup_success(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -90,7 +90,7 @@ int sk_lookup_success_simple(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -105,7 +105,7 @@ int err_use_after_free(struct __sk_buff *skb)
return family;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -120,7 +120,7 @@ int err_modify_sk_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -134,7 +134,7 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -143,7 +143,7 @@ int err_no_release(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -155,7 +155,7 @@ int err_release_twice(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("tc")
+SEC("?tc")
int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
index e6cb09259408..1926facba122 100644
--- a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
+++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
@@ -14,7 +14,7 @@ char current_regs[PT_REGS_SIZE] = {};
char ctx_regs[PT_REGS_SIZE] = {};
int uprobe_res = 0;
-SEC("uprobe/trigger_func")
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
struct task_struct *current;
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
new file mode 100644
index 000000000000..ab75522e2eeb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int uprobe_byname_parm1 = 0;
+int uprobe_byname_ran = 0;
+int uretprobe_byname_rc = 0;
+int uretprobe_byname_ran = 0;
+size_t uprobe_byname2_parm1 = 0;
+int uprobe_byname2_ran = 0;
+char *uretprobe_byname2_rc = NULL;
+int uretprobe_byname2_ran = 0;
+
+int test_pid;
+
+/* This program cannot auto-attach, but that should not stop other
+ * programs from attaching.
+ */
+SEC("uprobe")
+int handle_uprobe_noautoattach(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:autoattach_trigger_func")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname_ran = 1;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:autoattach_trigger_func")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_rc = PT_REGS_RC_CORE(ctx);
+ uretprobe_byname_ran = 2;
+ return 0;
+}
+
+
+SEC("uprobe/libc.so.6:malloc")
+int handle_uprobe_byname2(struct pt_regs *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uprobe_byname2_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname2_ran = 3;
+ return 0;
+}
+
+SEC("uretprobe/libc.so.6:malloc")
+int handle_uretprobe_byname2(struct pt_regs *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uretprobe_byname2_rc = (char *)PT_REGS_RC_CORE(ctx);
+ uretprobe_byname2_ran = 4;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_urandom_usdt.c b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
new file mode 100644
index 000000000000..3539b02bd5f7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int urand_pid;
+
+int urand_read_without_sema_call_cnt;
+int urand_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_without_sema")
+int BPF_USDT(urand_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urand_read_with_sema_call_cnt;
+int urand_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_with_sema")
+int BPF_USDT(urand_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_without_sema_call_cnt;
+int urandlib_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_without_sema")
+int BPF_USDT(urandlib_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_with_sema_call_cnt;
+int urandlib_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_with_sema")
+int BPF_USDT(urandlib_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c
new file mode 100644
index 000000000000..505aab9a5234
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int my_pid;
+
+int usdt0_called;
+u64 usdt0_cookie;
+int usdt0_arg_cnt;
+int usdt0_arg_ret;
+
+SEC("usdt")
+int usdt0(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt0_called, 1);
+
+ usdt0_cookie = bpf_usdt_cookie(ctx);
+ usdt0_arg_cnt = bpf_usdt_arg_cnt(ctx);
+ /* should return -ENOENT for any arg_num */
+ usdt0_arg_ret = bpf_usdt_arg(ctx, bpf_get_prandom_u32(), &tmp);
+ return 0;
+}
+
+int usdt3_called;
+u64 usdt3_cookie;
+int usdt3_arg_cnt;
+int usdt3_arg_rets[3];
+u64 usdt3_args[3];
+
+SEC("usdt//proc/self/exe:test:usdt3")
+int usdt3(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt3_called, 1);
+
+ usdt3_cookie = bpf_usdt_cookie(ctx);
+ usdt3_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt3_arg_rets[0] = bpf_usdt_arg(ctx, 0, &tmp);
+ usdt3_args[0] = (int)tmp;
+
+ usdt3_arg_rets[1] = bpf_usdt_arg(ctx, 1, &tmp);
+ usdt3_args[1] = (long)tmp;
+
+ usdt3_arg_rets[2] = bpf_usdt_arg(ctx, 2, &tmp);
+ usdt3_args[2] = (uintptr_t)tmp;
+
+ return 0;
+}
+
+int usdt12_called;
+u64 usdt12_cookie;
+int usdt12_arg_cnt;
+u64 usdt12_args[12];
+
+SEC("usdt//proc/self/exe:test:usdt12")
+int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5,
+ long a6, __u64 a7, uintptr_t a8, int a9, short a10,
+ short a11, signed char a12)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt12_called, 1);
+
+ usdt12_cookie = bpf_usdt_cookie(ctx);
+ usdt12_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt12_args[0] = a1;
+ usdt12_args[1] = a2;
+ usdt12_args[2] = a3;
+ usdt12_args[3] = a4;
+ usdt12_args[4] = a5;
+ usdt12_args[5] = a6;
+ usdt12_args[6] = a7;
+ usdt12_args[7] = a8;
+ usdt12_args[8] = a9;
+ usdt12_args[9] = a10;
+ usdt12_args[10] = a11;
+ usdt12_args[11] = a12;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
new file mode 100644
index 000000000000..aa6de32b50d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+/* this file is linked together with test_usdt.c to validate that usdt.bpf.h
+ * can be included in multiple .bpf.c files forming single final BPF object
+ * file
+ */
+
+extern int my_pid;
+
+int usdt_100_called;
+int usdt_100_sum;
+
+SEC("usdt//proc/self/exe:test:usdt_100")
+int BPF_USDT(usdt_100, int x)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt_100_called, 1);
+ __sync_fetch_and_add(&usdt_100_sum, x);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 596c4e71bf3a..125d872d7981 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -564,22 +564,22 @@ static bool get_packet_dst(struct real_definition **real,
hash = get_packet_hash(pckt, hash_16bytes);
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
key = 2 * vip_info->vip_num + hash % 2;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
- return 0;
+ return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
- return 0;
+ return false;
if (!(vip_info->flags & (1 << 1))) {
__u32 conn_rate_key = 512 + 2;
struct lb_stats *conn_rate_stats =
bpf_map_lookup_elem(&stats, &conn_rate_key);
if (!conn_rate_stats)
- return 1;
+ return true;
cur_time = bpf_ktime_get_ns();
if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
conn_rate_stats->v1 = 1;
@@ -587,14 +587,14 @@ static bool get_packet_dst(struct real_definition **real,
} else {
conn_rate_stats->v1 += 1;
if (conn_rate_stats->v1 >= 1)
- return 1;
+ return true;
}
if (pckt->flow.proto == IPPROTO_UDP)
new_dst_lru.atime = cur_time;
new_dst_lru.pos = key;
bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
}
- return 1;
+ return true;
}
__attribute__ ((noinline))
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 2ab049b54d6c..694e7cec1823 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -54,7 +54,7 @@ int bench_trigger_fmodret(void *ctx)
return -22;
}
-SEC("uprobe/self/uprobe_target")
+SEC("uprobe")
int bench_trigger_uprobe(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
diff --git a/tools/testing/selftests/bpf/sdt-config.h b/tools/testing/selftests/bpf/sdt-config.h
new file mode 100644
index 000000000000..733045a52771
--- /dev/null
+++ b/tools/testing/selftests/bpf/sdt-config.h
@@ -0,0 +1,6 @@
+/* includes/sys/sdt-config.h. Generated from sdt-config.h.in by configure.
+
+ This file just defines _SDT_ASM_SECTION_AUTOGROUP_SUPPORT to 0 or 1 to
+ indicate whether the assembler supports "?" in .pushsection directives. */
+
+#define _SDT_ASM_SECTION_AUTOGROUP_SUPPORT 1
diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h
new file mode 100644
index 000000000000..ca0162b4dc57
--- /dev/null
+++ b/tools/testing/selftests/bpf/sdt.h
@@ -0,0 +1,513 @@
+/* <sys/sdt.h> - Systemtap static probe definition macros.
+
+ This file is dedicated to the public domain, pursuant to CC0
+ (https://creativecommons.org/publicdomain/zero/1.0/)
+*/
+
+#ifndef _SYS_SDT_H
+#define _SYS_SDT_H 1
+
+/*
+ This file defines a family of macros
+
+ STAP_PROBEn(op1, ..., opn)
+
+ that emit a nop into the instruction stream, and some data into an auxiliary
+ note section. The data in the note section describes the operands, in terms
+ of size and location. Each location is encoded as assembler operand string.
+ Consumer tools such as gdb or systemtap insert breakpoints on top of
+ the nop, and decode the location operand-strings, like an assembler,
+ to find the values being passed.
+
+ The operand strings are selected by the compiler for each operand.
+ They are constrained by gcc inline-assembler codes. The default is:
+
+ #define STAP_SDT_ARG_CONSTRAINT nor
+
+ This is a good default if the operands tend to be integral and
+ moderate in number (smaller than number of registers). In other
+ cases, the compiler may report "'asm' requires impossible reload" or
+ similar. In this case, consider simplifying the macro call (fewer
+ and simpler operands), reduce optimization, or override the default
+ constraints string via:
+
+ #define STAP_SDT_ARG_CONSTRAINT g
+ #include <sys/sdt.h>
+
+ See also:
+ https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ https://gcc.gnu.org/onlinedocs/gcc/Constraints.html
+ */
+
+
+
+#ifdef __ASSEMBLER__
+# define _SDT_PROBE(provider, name, n, arglist) \
+ _SDT_ASM_BODY(provider, name, _SDT_ASM_SUBSTR_1, (_SDT_DEPAREN_##n arglist)) \
+ _SDT_ASM_BASE
+# define _SDT_ASM_1(x) x;
+# define _SDT_ASM_2(a, b) a,b;
+# define _SDT_ASM_3(a, b, c) a,b,c;
+# define _SDT_ASM_5(a, b, c, d, e) a,b,c,d,e;
+# define _SDT_ASM_STRING_1(x) .asciz #x;
+# define _SDT_ASM_SUBSTR_1(x) .ascii #x;
+# define _SDT_DEPAREN_0() /* empty */
+# define _SDT_DEPAREN_1(a) a
+# define _SDT_DEPAREN_2(a,b) a b
+# define _SDT_DEPAREN_3(a,b,c) a b c
+# define _SDT_DEPAREN_4(a,b,c,d) a b c d
+# define _SDT_DEPAREN_5(a,b,c,d,e) a b c d e
+# define _SDT_DEPAREN_6(a,b,c,d,e,f) a b c d e f
+# define _SDT_DEPAREN_7(a,b,c,d,e,f,g) a b c d e f g
+# define _SDT_DEPAREN_8(a,b,c,d,e,f,g,h) a b c d e f g h
+# define _SDT_DEPAREN_9(a,b,c,d,e,f,g,h,i) a b c d e f g h i
+# define _SDT_DEPAREN_10(a,b,c,d,e,f,g,h,i,j) a b c d e f g h i j
+# define _SDT_DEPAREN_11(a,b,c,d,e,f,g,h,i,j,k) a b c d e f g h i j k
+# define _SDT_DEPAREN_12(a,b,c,d,e,f,g,h,i,j,k,l) a b c d e f g h i j k l
+#else
+#if defined _SDT_HAS_SEMAPHORES
+#define _SDT_NOTE_SEMAPHORE_USE(provider, name) \
+ __asm__ __volatile__ ("" :: "m" (provider##_##name##_semaphore));
+#else
+#define _SDT_NOTE_SEMAPHORE_USE(provider, name)
+#endif
+
+# define _SDT_PROBE(provider, name, n, arglist) \
+ do { \
+ _SDT_NOTE_SEMAPHORE_USE(provider, name); \
+ __asm__ __volatile__ (_SDT_ASM_BODY(provider, name, _SDT_ASM_ARGS, (n)) \
+ :: _SDT_ASM_OPERANDS_##n arglist); \
+ __asm__ __volatile__ (_SDT_ASM_BASE); \
+ } while (0)
+# define _SDT_S(x) #x
+# define _SDT_ASM_1(x) _SDT_S(x) "\n"
+# define _SDT_ASM_2(a, b) _SDT_S(a) "," _SDT_S(b) "\n"
+# define _SDT_ASM_3(a, b, c) _SDT_S(a) "," _SDT_S(b) "," \
+ _SDT_S(c) "\n"
+# define _SDT_ASM_5(a, b, c, d, e) _SDT_S(a) "," _SDT_S(b) "," \
+ _SDT_S(c) "," _SDT_S(d) "," \
+ _SDT_S(e) "\n"
+# define _SDT_ASM_ARGS(n) _SDT_ASM_TEMPLATE_##n
+# define _SDT_ASM_STRING_1(x) _SDT_ASM_1(.asciz #x)
+# define _SDT_ASM_SUBSTR_1(x) _SDT_ASM_1(.ascii #x)
+
+# define _SDT_ARGFMT(no) _SDT_ASM_1(_SDT_SIGN %n[_SDT_S##no]) \
+ _SDT_ASM_1(_SDT_SIZE %n[_SDT_S##no]) \
+ _SDT_ASM_1(_SDT_TYPE %n[_SDT_S##no]) \
+ _SDT_ASM_SUBSTR(_SDT_ARGTMPL(_SDT_A##no))
+
+
+# ifndef STAP_SDT_ARG_CONSTRAINT
+# if defined __powerpc__
+# define STAP_SDT_ARG_CONSTRAINT nZr
+# elif defined __arm__
+# define STAP_SDT_ARG_CONSTRAINT g
+# else
+# define STAP_SDT_ARG_CONSTRAINT nor
+# endif
+# endif
+
+# define _SDT_STRINGIFY(x) #x
+# define _SDT_ARG_CONSTRAINT_STRING(x) _SDT_STRINGIFY(x)
+/* _SDT_S encodes the size and type as 0xSSTT which is decoded by the assembler
+ macros _SDT_SIZE and _SDT_TYPE */
+# define _SDT_ARG(n, x) \
+ [_SDT_S##n] "n" ((_SDT_ARGSIGNED (x) ? (int)-1 : 1) * (-(((int) _SDT_ARGSIZE (x)) << 8) + (-(0x7f & __builtin_classify_type (x))))), \
+ [_SDT_A##n] _SDT_ARG_CONSTRAINT_STRING (STAP_SDT_ARG_CONSTRAINT) (_SDT_ARGVAL (x))
+#endif
+#define _SDT_ASM_STRING(x) _SDT_ASM_STRING_1(x)
+#define _SDT_ASM_SUBSTR(x) _SDT_ASM_SUBSTR_1(x)
+
+#define _SDT_ARGARRAY(x) (__builtin_classify_type (x) == 14 \
+ || __builtin_classify_type (x) == 5)
+
+#ifdef __cplusplus
+# define _SDT_ARGSIGNED(x) (!_SDT_ARGARRAY (x) \
+ && __sdt_type<__typeof (x)>::__sdt_signed)
+# define _SDT_ARGSIZE(x) (_SDT_ARGARRAY (x) \
+ ? sizeof (void *) : sizeof (x))
+# define _SDT_ARGVAL(x) (x)
+
+# include <cstddef>
+
+template<typename __sdt_T>
+struct __sdt_type
+{
+ static const bool __sdt_signed = false;
+};
+
+#define __SDT_ALWAYS_SIGNED(T) \
+template<> struct __sdt_type<T> { static const bool __sdt_signed = true; };
+#define __SDT_COND_SIGNED(T,CT) \
+template<> struct __sdt_type<T> { static const bool __sdt_signed = ((CT)(-1) < 1); };
+__SDT_ALWAYS_SIGNED(signed char)
+__SDT_ALWAYS_SIGNED(short)
+__SDT_ALWAYS_SIGNED(int)
+__SDT_ALWAYS_SIGNED(long)
+__SDT_ALWAYS_SIGNED(long long)
+__SDT_ALWAYS_SIGNED(volatile signed char)
+__SDT_ALWAYS_SIGNED(volatile short)
+__SDT_ALWAYS_SIGNED(volatile int)
+__SDT_ALWAYS_SIGNED(volatile long)
+__SDT_ALWAYS_SIGNED(volatile long long)
+__SDT_ALWAYS_SIGNED(const signed char)
+__SDT_ALWAYS_SIGNED(const short)
+__SDT_ALWAYS_SIGNED(const int)
+__SDT_ALWAYS_SIGNED(const long)
+__SDT_ALWAYS_SIGNED(const long long)
+__SDT_ALWAYS_SIGNED(const volatile signed char)
+__SDT_ALWAYS_SIGNED(const volatile short)
+__SDT_ALWAYS_SIGNED(const volatile int)
+__SDT_ALWAYS_SIGNED(const volatile long)
+__SDT_ALWAYS_SIGNED(const volatile long long)
+__SDT_COND_SIGNED(char, char)
+__SDT_COND_SIGNED(wchar_t, wchar_t)
+__SDT_COND_SIGNED(volatile char, char)
+__SDT_COND_SIGNED(volatile wchar_t, wchar_t)
+__SDT_COND_SIGNED(const char, char)
+__SDT_COND_SIGNED(const wchar_t, wchar_t)
+__SDT_COND_SIGNED(const volatile char, char)
+__SDT_COND_SIGNED(const volatile wchar_t, wchar_t)
+#if defined (__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
+/* __SDT_COND_SIGNED(char16_t) */
+/* __SDT_COND_SIGNED(char32_t) */
+#endif
+
+template<typename __sdt_E>
+struct __sdt_type<__sdt_E[]> : public __sdt_type<__sdt_E *> {};
+
+template<typename __sdt_E, size_t __sdt_N>
+struct __sdt_type<__sdt_E[__sdt_N]> : public __sdt_type<__sdt_E *> {};
+
+#elif !defined(__ASSEMBLER__)
+__extension__ extern unsigned long long __sdt_unsp;
+# define _SDT_ARGINTTYPE(x) \
+ __typeof (__builtin_choose_expr (((__builtin_classify_type (x) \
+ + 3) & -4) == 4, (x), 0U))
+# define _SDT_ARGSIGNED(x) \
+ (!__extension__ \
+ (__builtin_constant_p ((((unsigned long long) \
+ (_SDT_ARGINTTYPE (x)) __sdt_unsp) \
+ & ((unsigned long long)1 << (sizeof (unsigned long long) \
+ * __CHAR_BIT__ - 1))) == 0) \
+ || (_SDT_ARGINTTYPE (x)) -1 > (_SDT_ARGINTTYPE (x)) 0))
+# define _SDT_ARGSIZE(x) \
+ (_SDT_ARGARRAY (x) ? sizeof (void *) : sizeof (x))
+# define _SDT_ARGVAL(x) (x)
+#endif
+
+#if defined __powerpc__ || defined __powerpc64__
+# define _SDT_ARGTMPL(id) %I[id]%[id]
+#elif defined __i386__
+# define _SDT_ARGTMPL(id) %k[id] /* gcc.gnu.org/PR80115 sourceware.org/PR24541 */
+#else
+# define _SDT_ARGTMPL(id) %[id]
+#endif
+
+/* NB: gdb PR24541 highlighted an unspecified corner of the sdt.h
+ operand note format.
+
+ The named register may be a longer or shorter (!) alias for the
+ storage where the value in question is found. For example, on
+ i386, 64-bit value may be put in register pairs, and the register
+ name stored would identify just one of them. Previously, gcc was
+ asked to emit the %w[id] (16-bit alias of some registers holding
+ operands), even when a wider 32-bit value was used.
+
+ Bottom line: the byte-width given before the @ sign governs. If
+ there is a mismatch between that width and that of the named
+ register, then a sys/sdt.h note consumer may need to employ
+ architecture-specific heuristics to figure out where the compiler
+ has actually put the complete value.
+*/
+
+#ifdef __LP64__
+# define _SDT_ASM_ADDR .8byte
+#else
+# define _SDT_ASM_ADDR .4byte
+#endif
+
+/* The ia64 and s390 nop instructions take an argument. */
+#if defined(__ia64__) || defined(__s390__) || defined(__s390x__)
+#define _SDT_NOP nop 0
+#else
+#define _SDT_NOP nop
+#endif
+
+#define _SDT_NOTE_NAME "stapsdt"
+#define _SDT_NOTE_TYPE 3
+
+/* If the assembler supports the necessary feature, then we can play
+ nice with code in COMDAT sections, which comes up in C++ code.
+ Without that assembler support, some combinations of probe placements
+ in certain kinds of C++ code may produce link-time errors. */
+#include "sdt-config.h"
+#if _SDT_ASM_SECTION_AUTOGROUP_SUPPORT
+# define _SDT_ASM_AUTOGROUP "?"
+#else
+# define _SDT_ASM_AUTOGROUP ""
+#endif
+
+#define _SDT_DEF_MACROS \
+ _SDT_ASM_1(.altmacro) \
+ _SDT_ASM_1(.macro _SDT_SIGN x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_1(.iflt \\x) \
+ _SDT_ASM_1(.ascii "-") \
+ _SDT_ASM_1(.endif) \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_SIZE_ x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_1(.ascii "\x") \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_SIZE x) \
+ _SDT_ASM_1(_SDT_SIZE_ %%((-(-\\x*((-\\x>0)-(-\\x<0))))>>8)) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_TYPE_ x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_2(.ifc 8,\\x) \
+ _SDT_ASM_1(.ascii "f") \
+ _SDT_ASM_1(.endif) \
+ _SDT_ASM_1(.ascii "@") \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_TYPE x) \
+ _SDT_ASM_1(_SDT_TYPE_ %%((\\x)&(0xff))) \
+ _SDT_ASM_1(.endm)
+
+#define _SDT_UNDEF_MACROS \
+ _SDT_ASM_1(.purgem _SDT_SIGN) \
+ _SDT_ASM_1(.purgem _SDT_SIZE_) \
+ _SDT_ASM_1(.purgem _SDT_SIZE) \
+ _SDT_ASM_1(.purgem _SDT_TYPE_) \
+ _SDT_ASM_1(.purgem _SDT_TYPE)
+
+#define _SDT_ASM_BODY(provider, name, pack_args, args, ...) \
+ _SDT_DEF_MACROS \
+ _SDT_ASM_1(990: _SDT_NOP) \
+ _SDT_ASM_3( .pushsection .note.stapsdt,_SDT_ASM_AUTOGROUP,"note") \
+ _SDT_ASM_1( .balign 4) \
+ _SDT_ASM_3( .4byte 992f-991f, 994f-993f, _SDT_NOTE_TYPE) \
+ _SDT_ASM_1(991: .asciz _SDT_NOTE_NAME) \
+ _SDT_ASM_1(992: .balign 4) \
+ _SDT_ASM_1(993: _SDT_ASM_ADDR 990b) \
+ _SDT_ASM_1( _SDT_ASM_ADDR _.stapsdt.base) \
+ _SDT_SEMAPHORE(provider,name) \
+ _SDT_ASM_STRING(provider) \
+ _SDT_ASM_STRING(name) \
+ pack_args args \
+ _SDT_ASM_SUBSTR(\x00) \
+ _SDT_UNDEF_MACROS \
+ _SDT_ASM_1(994: .balign 4) \
+ _SDT_ASM_1( .popsection)
+
+#define _SDT_ASM_BASE \
+ _SDT_ASM_1(.ifndef _.stapsdt.base) \
+ _SDT_ASM_5( .pushsection .stapsdt.base,"aG","progbits", \
+ .stapsdt.base,comdat) \
+ _SDT_ASM_1( .weak _.stapsdt.base) \
+ _SDT_ASM_1( .hidden _.stapsdt.base) \
+ _SDT_ASM_1( _.stapsdt.base: .space 1) \
+ _SDT_ASM_2( .size _.stapsdt.base, 1) \
+ _SDT_ASM_1( .popsection) \
+ _SDT_ASM_1(.endif)
+
+#if defined _SDT_HAS_SEMAPHORES
+#define _SDT_SEMAPHORE(p,n) \
+ _SDT_ASM_1( _SDT_ASM_ADDR p##_##n##_semaphore)
+#else
+#define _SDT_SEMAPHORE(p,n) _SDT_ASM_1( _SDT_ASM_ADDR 0)
+#endif
+
+#define _SDT_ASM_BLANK _SDT_ASM_SUBSTR(\x20)
+#define _SDT_ASM_TEMPLATE_0 /* no arguments */
+#define _SDT_ASM_TEMPLATE_1 _SDT_ARGFMT(1)
+#define _SDT_ASM_TEMPLATE_2 _SDT_ASM_TEMPLATE_1 _SDT_ASM_BLANK _SDT_ARGFMT(2)
+#define _SDT_ASM_TEMPLATE_3 _SDT_ASM_TEMPLATE_2 _SDT_ASM_BLANK _SDT_ARGFMT(3)
+#define _SDT_ASM_TEMPLATE_4 _SDT_ASM_TEMPLATE_3 _SDT_ASM_BLANK _SDT_ARGFMT(4)
+#define _SDT_ASM_TEMPLATE_5 _SDT_ASM_TEMPLATE_4 _SDT_ASM_BLANK _SDT_ARGFMT(5)
+#define _SDT_ASM_TEMPLATE_6 _SDT_ASM_TEMPLATE_5 _SDT_ASM_BLANK _SDT_ARGFMT(6)
+#define _SDT_ASM_TEMPLATE_7 _SDT_ASM_TEMPLATE_6 _SDT_ASM_BLANK _SDT_ARGFMT(7)
+#define _SDT_ASM_TEMPLATE_8 _SDT_ASM_TEMPLATE_7 _SDT_ASM_BLANK _SDT_ARGFMT(8)
+#define _SDT_ASM_TEMPLATE_9 _SDT_ASM_TEMPLATE_8 _SDT_ASM_BLANK _SDT_ARGFMT(9)
+#define _SDT_ASM_TEMPLATE_10 _SDT_ASM_TEMPLATE_9 _SDT_ASM_BLANK _SDT_ARGFMT(10)
+#define _SDT_ASM_TEMPLATE_11 _SDT_ASM_TEMPLATE_10 _SDT_ASM_BLANK _SDT_ARGFMT(11)
+#define _SDT_ASM_TEMPLATE_12 _SDT_ASM_TEMPLATE_11 _SDT_ASM_BLANK _SDT_ARGFMT(12)
+#define _SDT_ASM_OPERANDS_0() [__sdt_dummy] "g" (0)
+#define _SDT_ASM_OPERANDS_1(arg1) _SDT_ARG(1, arg1)
+#define _SDT_ASM_OPERANDS_2(arg1, arg2) \
+ _SDT_ASM_OPERANDS_1(arg1), _SDT_ARG(2, arg2)
+#define _SDT_ASM_OPERANDS_3(arg1, arg2, arg3) \
+ _SDT_ASM_OPERANDS_2(arg1, arg2), _SDT_ARG(3, arg3)
+#define _SDT_ASM_OPERANDS_4(arg1, arg2, arg3, arg4) \
+ _SDT_ASM_OPERANDS_3(arg1, arg2, arg3), _SDT_ARG(4, arg4)
+#define _SDT_ASM_OPERANDS_5(arg1, arg2, arg3, arg4, arg5) \
+ _SDT_ASM_OPERANDS_4(arg1, arg2, arg3, arg4), _SDT_ARG(5, arg5)
+#define _SDT_ASM_OPERANDS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
+ _SDT_ASM_OPERANDS_5(arg1, arg2, arg3, arg4, arg5), _SDT_ARG(6, arg6)
+#define _SDT_ASM_OPERANDS_7(arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ _SDT_ASM_OPERANDS_6(arg1, arg2, arg3, arg4, arg5, arg6), _SDT_ARG(7, arg7)
+#define _SDT_ASM_OPERANDS_8(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
+ _SDT_ASM_OPERANDS_7(arg1, arg2, arg3, arg4, arg5, arg6, arg7), \
+ _SDT_ARG(8, arg8)
+#define _SDT_ASM_OPERANDS_9(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9) \
+ _SDT_ASM_OPERANDS_8(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8), \
+ _SDT_ARG(9, arg9)
+#define _SDT_ASM_OPERANDS_10(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10) \
+ _SDT_ASM_OPERANDS_9(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9), \
+ _SDT_ARG(10, arg10)
+#define _SDT_ASM_OPERANDS_11(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_ASM_OPERANDS_10(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10), \
+ _SDT_ARG(11, arg11)
+#define _SDT_ASM_OPERANDS_12(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12) \
+ _SDT_ASM_OPERANDS_11(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11), \
+ _SDT_ARG(12, arg12)
+
+/* These macros can be used in C, C++, or assembly code.
+ In assembly code the arguments should use normal assembly operand syntax. */
+
+#define STAP_PROBE(provider, name) \
+ _SDT_PROBE(provider, name, 0, ())
+#define STAP_PROBE1(provider, name, arg1) \
+ _SDT_PROBE(provider, name, 1, (arg1))
+#define STAP_PROBE2(provider, name, arg1, arg2) \
+ _SDT_PROBE(provider, name, 2, (arg1, arg2))
+#define STAP_PROBE3(provider, name, arg1, arg2, arg3) \
+ _SDT_PROBE(provider, name, 3, (arg1, arg2, arg3))
+#define STAP_PROBE4(provider, name, arg1, arg2, arg3, arg4) \
+ _SDT_PROBE(provider, name, 4, (arg1, arg2, arg3, arg4))
+#define STAP_PROBE5(provider, name, arg1, arg2, arg3, arg4, arg5) \
+ _SDT_PROBE(provider, name, 5, (arg1, arg2, arg3, arg4, arg5))
+#define STAP_PROBE6(provider, name, arg1, arg2, arg3, arg4, arg5, arg6) \
+ _SDT_PROBE(provider, name, 6, (arg1, arg2, arg3, arg4, arg5, arg6))
+#define STAP_PROBE7(provider, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ _SDT_PROBE(provider, name, 7, (arg1, arg2, arg3, arg4, arg5, arg6, arg7))
+#define STAP_PROBE8(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8) \
+ _SDT_PROBE(provider, name, 8, (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8))
+#define STAP_PROBE9(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9)\
+ _SDT_PROBE(provider, name, 9, (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9))
+#define STAP_PROBE10(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10) \
+ _SDT_PROBE(provider, name, 10, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10))
+#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_PROBE(provider, name, 11, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
+#define STAP_PROBE12(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12) \
+ _SDT_PROBE(provider, name, 12, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12))
+
+/* This STAP_PROBEV macro can be used in variadic scenarios, where the
+ number of probe arguments is not known until compile time. Since
+ variadic macro support may vary with compiler options, you must
+ pre-#define SDT_USE_VARIADIC to enable this type of probe.
+
+ The trick to count __VA_ARGS__ was inspired by this post by
+ Laurent Deniau <laurent.deniau@cern.ch>:
+ http://groups.google.com/group/comp.std.c/msg/346fc464319b1ee5
+
+ Note that our _SDT_NARG is called with an extra 0 arg that's not
+ counted, so we don't have to worry about the behavior of macros
+ called without any arguments. */
+
+#define _SDT_NARG(...) __SDT_NARG(__VA_ARGS__, 12,11,10,9,8,7,6,5,4,3,2,1,0)
+#define __SDT_NARG(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12, N, ...) N
+#ifdef SDT_USE_VARIADIC
+#define _SDT_PROBE_N(provider, name, N, ...) \
+ _SDT_PROBE(provider, name, N, (__VA_ARGS__))
+#define STAP_PROBEV(provider, name, ...) \
+ _SDT_PROBE_N(provider, name, _SDT_NARG(0, ##__VA_ARGS__), ##__VA_ARGS__)
+#endif
+
+/* These macros are for use in asm statements. You must compile
+ with -std=gnu99 or -std=c99 to use the STAP_PROBE_ASM macro.
+
+ The STAP_PROBE_ASM macro generates a quoted string to be used in the
+ template portion of the asm statement, concatenated with strings that
+ contain the actual assembly code around the probe site.
+
+ For example:
+
+ asm ("before\n"
+ STAP_PROBE_ASM(provider, fooprobe, %eax 4(%esi))
+ "after");
+
+ emits the assembly code for "before\nafter", with a probe in between.
+ The probe arguments are the %eax register, and the value of the memory
+ word located 4 bytes past the address in the %esi register. Note that
+ because this is a simple asm, not a GNU C extended asm statement, these
+ % characters do not need to be doubled to generate literal %reg names.
+
+ In a GNU C extended asm statement, the probe arguments can be specified
+ using the macro STAP_PROBE_ASM_TEMPLATE(n) for n arguments. The paired
+ macro STAP_PROBE_ASM_OPERANDS gives the C values of these probe arguments,
+ and appears in the input operand list of the asm statement. For example:
+
+ asm ("someinsn %0,%1\n" // %0 is output operand, %1 is input operand
+ STAP_PROBE_ASM(provider, fooprobe, STAP_PROBE_ASM_TEMPLATE(3))
+ "otherinsn %[namedarg]"
+ : "r" (outvar)
+ : "g" (some_value), [namedarg] "i" (1234),
+ STAP_PROBE_ASM_OPERANDS(3, some_value, some_ptr->field, 1234));
+
+ This is just like writing:
+
+ STAP_PROBE3(provider, fooprobe, some_value, some_ptr->field, 1234));
+
+ but the probe site is right between "someinsn" and "otherinsn".
+
+ The probe arguments in STAP_PROBE_ASM can be given as assembly
+ operands instead, even inside a GNU C extended asm statement.
+ Note that these can use operand templates like %0 or %[name],
+ and likewise they must write %%reg for a literal operand of %reg. */
+
+#define _SDT_ASM_BODY_1(p,n,...) _SDT_ASM_BODY(p,n,_SDT_ASM_SUBSTR,(__VA_ARGS__))
+#define _SDT_ASM_BODY_2(p,n,...) _SDT_ASM_BODY(p,n,/*_SDT_ASM_STRING */,__VA_ARGS__)
+#define _SDT_ASM_BODY_N2(p,n,no,...) _SDT_ASM_BODY_ ## no(p,n,__VA_ARGS__)
+#define _SDT_ASM_BODY_N1(p,n,no,...) _SDT_ASM_BODY_N2(p,n,no,__VA_ARGS__)
+#define _SDT_ASM_BODY_N(p,n,...) _SDT_ASM_BODY_N1(p,n,_SDT_NARG(0, __VA_ARGS__),__VA_ARGS__)
+
+#if __STDC_VERSION__ >= 199901L
+# define STAP_PROBE_ASM(provider, name, ...) \
+ _SDT_ASM_BODY_N(provider, name, __VA_ARGS__) \
+ _SDT_ASM_BASE
+# define STAP_PROBE_ASM_OPERANDS(n, ...) _SDT_ASM_OPERANDS_##n(__VA_ARGS__)
+#else
+# define STAP_PROBE_ASM(provider, name, args) \
+ _SDT_ASM_BODY(provider, name, /* _SDT_ASM_STRING */, (args)) \
+ _SDT_ASM_BASE
+#endif
+#define STAP_PROBE_ASM_TEMPLATE(n) _SDT_ASM_TEMPLATE_##n,"use _SDT_ASM_TEMPLATE_"
+
+
+/* DTrace compatible macro names. */
+#define DTRACE_PROBE(provider,probe) \
+ STAP_PROBE(provider,probe)
+#define DTRACE_PROBE1(provider,probe,parm1) \
+ STAP_PROBE1(provider,probe,parm1)
+#define DTRACE_PROBE2(provider,probe,parm1,parm2) \
+ STAP_PROBE2(provider,probe,parm1,parm2)
+#define DTRACE_PROBE3(provider,probe,parm1,parm2,parm3) \
+ STAP_PROBE3(provider,probe,parm1,parm2,parm3)
+#define DTRACE_PROBE4(provider,probe,parm1,parm2,parm3,parm4) \
+ STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4)
+#define DTRACE_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
+ STAP_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5)
+#define DTRACE_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6) \
+ STAP_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6)
+#define DTRACE_PROBE7(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7) \
+ STAP_PROBE7(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7)
+#define DTRACE_PROBE8(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8) \
+ STAP_PROBE8(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8)
+#define DTRACE_PROBE9(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9) \
+ STAP_PROBE9(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9)
+#define DTRACE_PROBE10(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10) \
+ STAP_PROBE10(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10)
+#define DTRACE_PROBE11(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11) \
+ STAP_PROBE11(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11)
+#define DTRACE_PROBE12(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11,parm12) \
+ STAP_PROBE12(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11,parm12)
+
+
+#endif /* sys/sdt.h */
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index 6bf21e47882a..c0e7acd698ed 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -180,7 +180,7 @@ class FileExtractor(object):
@enum_name: name of the enum to parse
"""
start_marker = re.compile(f'enum {enum_name} {{\n')
- pattern = re.compile('^\s*(BPF_\w+),?$')
+ pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$')
end_marker = re.compile('^};')
parser = BlockParser(self.reader)
parser.search_block(start_marker)
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index d6a1be4d8020..0861ea60dcdd 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -6,7 +6,7 @@
#include <stdlib.h>
#include <sys/sysinfo.h>
-#include "bpf_rlimit.h"
+#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -44,13 +44,16 @@ int main(int argc, char **argv)
unsigned long long *percpu_value;
int cpu, nproc;
- nproc = get_nprocs_conf();
+ nproc = bpf_num_possible_cpus();
percpu_value = malloc(sizeof(*percpu_value) * nproc);
if (!percpu_value) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
sizeof(value), 0, NULL);
if (map_fd < 0) {
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index c299d3452695..7886265846a0 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -15,7 +15,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
@@ -28,6 +27,9 @@ int main(int argc, char **argv)
int prog_fd, cgroup_fd;
__u32 prog_cnt;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index aa294612e0a7..c028d621c744 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -26,7 +26,6 @@
#include <bpf/bpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
@@ -409,16 +408,13 @@ static void test_lpm_ipaddr(void)
/* Test some lookups that should not match any entry */
inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
- assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT);
close(map_fd_ipv4);
close(map_fd_ipv6);
@@ -475,18 +471,15 @@ static void test_lpm_delete(void)
/* remove non-existent node */
key->prefixlen = 32;
inet_pton(AF_INET, "10.0.0.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
key->prefixlen = 30; // unused prefix so far
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
key->prefixlen = 16; // same prefix as the root node
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
/* assert initial lookup */
key->prefixlen = 32;
@@ -531,8 +524,7 @@ static void test_lpm_delete(void)
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
close(map_fd);
}
@@ -553,8 +545,7 @@ static void test_lpm_get_next_key(void)
assert(map_fd >= 0);
/* empty tree. get_next_key should return ENOENT */
- assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT);
/* get and verify the first key, get the second one should fail. */
key_p->prefixlen = 16;
@@ -566,8 +557,7 @@ static void test_lpm_get_next_key(void)
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should get the first one in post order. */
key_p->prefixlen = 8;
@@ -591,8 +581,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total three) */
key_p->prefixlen = 24;
@@ -615,8 +604,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total four) */
key_p->prefixlen = 24;
@@ -644,8 +632,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
@@ -679,8 +666,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
@@ -791,6 +777,9 @@ int main(void)
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
test_lpm_basic();
test_lpm_order();
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 563bbe18c172..4d0650cfb5cd 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -18,7 +18,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "../../../include/linux/filter.h"
#define LOCAL_FREE_TARGET (128)
@@ -176,24 +175,20 @@ static void test_lru_sanity0(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
- errno == EINVAL);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -201,8 +196,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -218,8 +212,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* lookup elem key=1 and delete it, then check it doesn't exist */
key = 1;
@@ -382,8 +375,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@@ -563,8 +555,7 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
/* Cannot find the last key because it was removed by LRU */
- assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
}
/* Test map with only one element */
@@ -712,21 +703,18 @@ static void test_lru_sanity7(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -734,8 +722,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -758,8 +745,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -806,21 +792,18 @@ static void test_lru_sanity8(int map_type, int map_flags)
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
@@ -830,8 +813,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and do _not_ mark ref bit.
* this will be evicted on next update.
@@ -854,8 +836,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* key=1 has been removed from the LRU */
key = 1;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -878,6 +859,9 @@ int main(int argc, char **argv)
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index edaffd43da83..6cd6ef9fc20b 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -184,7 +184,7 @@ def bpftool_prog_list(expected=None, ns=""):
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
# Remove the base maps
- maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names]
+ maps = [m for m in maps if m not in base_maps and m.get('name') and m.get('name') not in base_map_names]
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 2ecb73a65206..c536d1d29d57 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -3,6 +3,7 @@
*/
#define _GNU_SOURCE
#include "test_progs.h"
+#include "testing_helpers.h"
#include "cgroup_helpers.h"
#include <argp.h>
#include <pthread.h>
@@ -50,19 +51,8 @@ struct prog_test_def {
int test_num;
void (*run_test)(void);
void (*run_serial_test)(void);
- bool force_log;
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
bool should_run;
- bool tested;
bool need_cgroup_cleanup;
-
- char *subtest_name;
- int subtest_num;
-
- /* store counts before subtest started */
- int old_error_cnt;
};
/* Override C runtime library's usleep() implementation to ensure nanosleep()
@@ -84,12 +74,13 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
int i;
for (i = 0; i < sel->blacklist.cnt; i++) {
- if (glob_match(name, sel->blacklist.strs[i]))
+ if (glob_match(name, sel->blacklist.tests[i].name) &&
+ !sel->blacklist.tests[i].subtest_cnt)
return false;
}
for (i = 0; i < sel->whitelist.cnt; i++) {
- if (glob_match(name, sel->whitelist.strs[i]))
+ if (glob_match(name, sel->whitelist.tests[i].name))
return true;
}
@@ -99,32 +90,69 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
return num < sel->num_set_len && sel->num_set[num];
}
-static void dump_test_log(const struct prog_test_def *test, bool failed)
+static bool should_run_subtest(struct test_selector *sel,
+ struct test_selector *subtest_sel,
+ int subtest_num,
+ const char *test_name,
+ const char *subtest_name)
{
- if (stdout == env.stdout)
- return;
+ int i, j;
- /* worker always holds log */
- if (env.worker_id != -1)
- return;
+ for (i = 0; i < sel->blacklist.cnt; i++) {
+ if (glob_match(test_name, sel->blacklist.tests[i].name)) {
+ if (!sel->blacklist.tests[i].subtest_cnt)
+ return false;
+
+ for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->blacklist.tests[i].subtests[j]))
+ return false;
+ }
+ }
+ }
- fflush(stdout); /* exports env.log_buf & env.log_cnt */
+ for (i = 0; i < sel->whitelist.cnt; i++) {
+ if (glob_match(test_name, sel->whitelist.tests[i].name)) {
+ if (!sel->whitelist.tests[i].subtest_cnt)
+ return true;
- if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
- if (env.log_cnt) {
- env.log_buf[env.log_cnt] = '\0';
- fprintf(env.stdout, "%s", env.log_buf);
- if (env.log_buf[env.log_cnt - 1] != '\n')
- fprintf(env.stdout, "\n");
+ for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->whitelist.tests[i].subtests[j]))
+ return true;
+ }
}
}
+
+ if (!sel->whitelist.cnt && !subtest_sel->num_set)
+ return true;
+
+ return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
}
-static void skip_account(void)
+static void dump_test_log(const struct prog_test_def *test,
+ const struct test_state *test_state,
+ bool force_failed)
{
- if (env.test->skip_cnt) {
- env.skip_cnt++;
- env.test->skip_cnt = 0;
+ bool failed = test_state->error_cnt > 0 || force_failed;
+
+ /* worker always holds log */
+ if (env.worker_id != -1)
+ return;
+
+ fflush(stdout); /* exports test_state->log_buf & test_state->log_cnt */
+
+ fprintf(env.stdout, "#%-3d %s:%s\n",
+ test->test_num, test->test_name,
+ failed ? "FAIL" : (test_state->skip_cnt ? "SKIP" : "OK"));
+
+ if (env.verbosity > VERBOSE_NONE || test_state->force_log || failed) {
+ if (test_state->log_cnt) {
+ test_state->log_buf[test_state->log_cnt] = '\0';
+ fprintf(env.stdout, "%s", test_state->log_buf);
+ if (test_state->log_buf[test_state->log_cnt - 1] != '\n')
+ fprintf(env.stdout, "\n");
+ }
}
}
@@ -135,7 +163,6 @@ static void stdio_restore(void);
*/
static void reset_affinity(void)
{
-
cpu_set_t cpuset;
int i, err;
@@ -178,68 +205,78 @@ static void restore_netns(void)
void test__end_subtest(void)
{
struct prog_test_def *test = env.test;
- int sub_error_cnt = test->error_cnt - test->old_error_cnt;
-
- dump_test_log(test, sub_error_cnt);
+ struct test_state *state = env.test_state;
+ int sub_error_cnt = state->error_cnt - state->old_error_cnt;
fprintf(stdout, "#%d/%d %s/%s:%s\n",
- test->test_num, test->subtest_num, test->test_name, test->subtest_name,
- sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
+ test->test_num, state->subtest_num, test->test_name, state->subtest_name,
+ sub_error_cnt ? "FAIL" : (state->subtest_skip_cnt ? "SKIP" : "OK"));
- if (sub_error_cnt)
- test->error_cnt++;
- else if (test->skip_cnt == 0)
- test->sub_succ_cnt++;
- skip_account();
+ if (sub_error_cnt == 0) {
+ if (state->subtest_skip_cnt == 0) {
+ state->sub_succ_cnt++;
+ } else {
+ state->subtest_skip_cnt = 0;
+ state->skip_cnt++;
+ }
+ }
- free(test->subtest_name);
- test->subtest_name = NULL;
+ free(state->subtest_name);
+ state->subtest_name = NULL;
}
-bool test__start_subtest(const char *name)
+bool test__start_subtest(const char *subtest_name)
{
struct prog_test_def *test = env.test;
+ struct test_state *state = env.test_state;
- if (test->subtest_name)
+ if (state->subtest_name)
test__end_subtest();
- test->subtest_num++;
+ state->subtest_num++;
- if (!name || !name[0]) {
+ if (!subtest_name || !subtest_name[0]) {
fprintf(env.stderr,
"Subtest #%d didn't provide sub-test name!\n",
- test->subtest_num);
+ state->subtest_num);
return false;
}
- if (!should_run(&env.subtest_selector, test->subtest_num, name))
+ if (!should_run_subtest(&env.test_selector,
+ &env.subtest_selector,
+ state->subtest_num,
+ test->test_name,
+ subtest_name))
return false;
- test->subtest_name = strdup(name);
- if (!test->subtest_name) {
+ state->subtest_name = strdup(subtest_name);
+ if (!state->subtest_name) {
fprintf(env.stderr,
"Subtest #%d: failed to copy subtest name!\n",
- test->subtest_num);
+ state->subtest_num);
return false;
}
- env.test->old_error_cnt = env.test->error_cnt;
+ state->old_error_cnt = state->error_cnt;
return true;
}
void test__force_log(void)
{
- env.test->force_log = true;
+ env.test_state->force_log = true;
}
void test__skip(void)
{
- env.test->skip_cnt++;
+ if (env.test_state->subtest_name)
+ env.test_state->subtest_skip_cnt++;
+ else
+ env.test_state->skip_cnt++;
}
void test__fail(void)
{
- env.test->error_cnt++;
+ env.test_state->error_cnt++;
}
int test__join_cgroup(const char *path)
@@ -472,8 +509,11 @@ static struct prog_test_def prog_test_defs[] = {
#include <prog_tests/tests.h>
#undef DEFINE_TEST
};
+
static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
+static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
+
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
static const char argp_program_doc[] = "BPF selftests test runner";
@@ -527,63 +567,29 @@ static int libbpf_print_fn(enum libbpf_print_level level,
return 0;
}
-static void free_str_set(const struct str_set *set)
+static void free_test_filter_set(const struct test_filter_set *set)
{
- int i;
+ int i, j;
if (!set)
return;
- for (i = 0; i < set->cnt; i++)
- free((void *)set->strs[i]);
- free(set->strs);
-}
-
-static int parse_str_list(const char *s, struct str_set *set, bool is_glob_pattern)
-{
- char *input, *state = NULL, *next, **tmp, **strs = NULL;
- int i, cnt = 0;
-
- input = strdup(s);
- if (!input)
- return -ENOMEM;
-
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
- tmp = realloc(strs, sizeof(*strs) * (cnt + 1));
- if (!tmp)
- goto err;
- strs = tmp;
-
- if (is_glob_pattern) {
- strs[cnt] = strdup(next);
- if (!strs[cnt])
- goto err;
- } else {
- strs[cnt] = malloc(strlen(next) + 2 + 1);
- if (!strs[cnt])
- goto err;
- sprintf(strs[cnt], "*%s*", next);
- }
+ for (i = 0; i < set->cnt; i++) {
+ free((void *)set->tests[i].name);
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
- cnt++;
+ free((void *)set->tests[i].subtests);
}
- tmp = realloc(set->strs, sizeof(*strs) * (cnt + set->cnt));
- if (!tmp)
- goto err;
- memcpy(tmp + set->cnt, strs, sizeof(*strs) * cnt);
- set->strs = (const char **)tmp;
- set->cnt += cnt;
+ free((void *)set->tests);
+}
- free(input);
- free(strs);
- return 0;
-err:
- for (i = 0; i < cnt; i++)
- free(strs[i]);
- free(strs);
- free(input);
- return -ENOMEM;
+static void free_test_selector(struct test_selector *test_selector)
+{
+ free_test_filter_set(&test_selector->blacklist);
+ free_test_filter_set(&test_selector->whitelist);
+ free(test_selector->num_set);
}
extern int extra_prog_load_log_flags;
@@ -615,33 +621,17 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST))
return -ENOMEM;
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST))
return -ENOMEM;
break;
}
@@ -706,7 +696,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return 0;
}
-static void stdio_hijack(void)
+static void stdio_hijack(char **log_buf, size_t *log_cnt)
{
#ifdef __GLIBC__
env.stdout = stdout;
@@ -720,7 +710,7 @@ static void stdio_hijack(void)
/* stdout and stderr -> buffer */
fflush(stdout);
- stdout = open_memstream(&env.log_buf, &env.log_cnt);
+ stdout = open_memstream(log_buf, log_cnt);
if (!stdout) {
stdout = env.stdout;
perror("open_memstream");
@@ -761,8 +751,10 @@ int cd_flavor_subdir(const char *exec_name)
const char *flavor = strrchr(exec_name, '/');
if (!flavor)
- return 0;
- flavor++;
+ flavor = exec_name;
+ else
+ flavor++;
+
flavor = strrchr(flavor, '-');
if (!flavor)
return 0;
@@ -821,7 +813,7 @@ void crash_handler(int signum)
sz = backtrace(bt, ARRAY_SIZE(bt));
if (env.test)
- dump_test_log(env.test, true);
+ dump_test_log(env.test, env.test_state, true);
if (env.stdout)
stdio_restore();
if (env.worker_id != -1)
@@ -843,17 +835,6 @@ static int current_test_idx;
static pthread_mutex_t current_test_lock;
static pthread_mutex_t stdout_output_lock;
-struct test_result {
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
-
- size_t log_cnt;
- char *log_buf;
-};
-
-static struct test_result test_results[ARRAY_SIZE(prog_test_defs)];
-
static inline const char *str_msg(const struct msg *msg, char *buf)
{
switch (msg->type) {
@@ -907,8 +888,12 @@ static int recv_message(int sock, struct msg *msg)
static void run_one_test(int test_num)
{
struct prog_test_def *test = &prog_test_defs[test_num];
+ struct test_state *state = &test_states[test_num];
env.test = test;
+ env.test_state = state;
+
+ stdio_hijack(&state->log_buf, &state->log_cnt);
if (test->run_test)
test->run_test();
@@ -916,17 +901,19 @@ static void run_one_test(int test_num)
test->run_serial_test();
/* ensure last sub-test is finalized properly */
- if (test->subtest_name)
+ if (state->subtest_name)
test__end_subtest();
- test->tested = true;
+ state->tested = true;
- dump_test_log(test, test->error_cnt);
+ dump_test_log(test, state, false);
reset_affinity();
restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
+
+ stdio_restore();
}
struct dispatch_data {
@@ -945,7 +932,7 @@ static void *dispatch_thread(void *ctx)
while (true) {
int test_to_run = -1;
struct prog_test_def *test;
- struct test_result *result;
+ struct test_state *state;
/* grab a test */
{
@@ -992,16 +979,15 @@ static void *dispatch_thread(void *ctx)
if (test_to_run != msg_test_done.test_done.test_num)
goto error;
- test->tested = true;
- result = &test_results[test_to_run];
-
- result->error_cnt = msg_test_done.test_done.error_cnt;
- result->skip_cnt = msg_test_done.test_done.skip_cnt;
- result->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt;
+ state = &test_states[test_to_run];
+ state->tested = true;
+ state->error_cnt = msg_test_done.test_done.error_cnt;
+ state->skip_cnt = msg_test_done.test_done.skip_cnt;
+ state->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt;
/* collect all logs */
if (msg_test_done.test_done.have_log) {
- log_fp = open_memstream(&result->log_buf, &result->log_cnt);
+ log_fp = open_memstream(&state->log_buf, &state->log_cnt);
if (!log_fp)
goto error;
@@ -1020,25 +1006,11 @@ static void *dispatch_thread(void *ctx)
fclose(log_fp);
log_fp = NULL;
}
- /* output log */
- {
- pthread_mutex_lock(&stdout_output_lock);
-
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
- }
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
-
- pthread_mutex_unlock(&stdout_output_lock);
- }
-
} /* wait for test done */
+
+ pthread_mutex_lock(&stdout_output_lock);
+ dump_test_log(test, state, false);
+ pthread_mutex_unlock(&stdout_output_lock);
} /* while (true) */
error:
if (env.debug)
@@ -1060,38 +1032,50 @@ done:
return NULL;
}
-static void print_all_error_logs(void)
+static void calculate_summary_and_print_errors(struct test_env *env)
{
int i;
+ int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
+
+ for (i = 0; i < prog_test_cnt; i++) {
+ struct test_state *state = &test_states[i];
- if (env.fail_cnt)
- fprintf(stdout, "\nAll error logs:\n");
+ if (!state->tested)
+ continue;
+
+ sub_succ_cnt += state->sub_succ_cnt;
+ skip_cnt += state->skip_cnt;
+
+ if (state->error_cnt)
+ fail_cnt++;
+ else
+ succ_cnt++;
+ }
+
+ if (fail_cnt)
+ printf("\nAll error logs:\n");
/* print error logs again */
for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *test;
- struct test_result *result;
-
- test = &prog_test_defs[i];
- result = &test_results[i];
+ struct prog_test_def *test = &prog_test_defs[i];
+ struct test_state *state = &test_states[i];
- if (!test->tested || !result->error_cnt)
+ if (!state->tested || !state->error_cnt)
continue;
- fprintf(stdout, "\n#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
-
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
- }
+ dump_test_log(test, state, true);
}
+
+ printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
+ succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
+
+ env->succ_cnt = succ_cnt;
+ env->sub_succ_cnt = sub_succ_cnt;
+ env->fail_cnt = fail_cnt;
+ env->skip_cnt = skip_cnt;
}
-static int server_main(void)
+static void server_main(void)
{
pthread_t *dispatcher_threads;
struct dispatch_data *data;
@@ -1147,60 +1131,18 @@ static int server_main(void)
for (int i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result = &test_results[i];
if (!test->should_run || !test->run_serial_test)
continue;
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
- if (env.log_buf) {
- result->log_cnt = env.log_cnt;
- result->log_buf = strdup(env.log_buf);
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
- restore_netns();
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result->error_cnt = test->error_cnt;
- result->skip_cnt = test->skip_cnt;
- result->sub_succ_cnt = test->sub_succ_cnt;
}
/* generate summary */
fflush(stderr);
fflush(stdout);
- for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *current_test;
- struct test_result *result;
-
- current_test = &prog_test_defs[i];
- result = &test_results[i];
-
- if (!current_test->tested)
- continue;
-
- env.succ_cnt += result->error_cnt ? 0 : 1;
- env.skip_cnt += result->skip_cnt;
- if (result->error_cnt)
- env.fail_cnt++;
- env.sub_succ_cnt += result->sub_succ_cnt;
- }
-
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
/* reap all workers */
for (i = 0; i < env.workers; i++) {
@@ -1210,8 +1152,6 @@ static int server_main(void)
if (pid != env.worker_pids[i])
perror("Unable to reap worker");
}
-
- return 0;
}
static int worker_main(int sock)
@@ -1232,35 +1172,29 @@ static int worker_main(int sock)
env.worker_id);
goto out;
case MSG_DO_TEST: {
- int test_to_run;
- struct prog_test_def *test;
+ int test_to_run = msg.do_test.test_num;
+ struct prog_test_def *test = &prog_test_defs[test_to_run];
+ struct test_state *state = &test_states[test_to_run];
struct msg msg_done;
- test_to_run = msg.do_test.test_num;
- test = &prog_test_defs[test_to_run];
-
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s running.\n",
env.worker_id,
test_to_run + 1,
test->test_name);
- stdio_hijack();
-
run_one_test(test_to_run);
- stdio_restore();
-
memset(&msg_done, 0, sizeof(msg_done));
msg_done.type = MSG_TEST_DONE;
msg_done.test_done.test_num = test_to_run;
- msg_done.test_done.error_cnt = test->error_cnt;
- msg_done.test_done.skip_cnt = test->skip_cnt;
- msg_done.test_done.sub_succ_cnt = test->sub_succ_cnt;
+ msg_done.test_done.error_cnt = state->error_cnt;
+ msg_done.test_done.skip_cnt = state->skip_cnt;
+ msg_done.test_done.sub_succ_cnt = state->sub_succ_cnt;
msg_done.test_done.have_log = false;
- if (env.verbosity > VERBOSE_NONE || test->force_log || test->error_cnt) {
- if (env.log_cnt)
+ if (env.verbosity > VERBOSE_NONE || state->force_log || state->error_cnt) {
+ if (state->log_cnt)
msg_done.test_done.have_log = true;
}
if (send_message(sock, &msg_done) < 0) {
@@ -1273,8 +1207,8 @@ static int worker_main(int sock)
char *src;
size_t slen;
- src = env.log_buf;
- slen = env.log_cnt;
+ src = state->log_buf;
+ slen = state->log_cnt;
while (slen) {
struct msg msg_log;
char *dest;
@@ -1294,10 +1228,10 @@ static int worker_main(int sock)
assert(send_message(sock, &msg_log) >= 0);
}
}
- if (env.log_buf) {
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
+ if (state->log_buf) {
+ free(state->log_buf);
+ state->log_buf = NULL;
+ state->log_cnt = 0;
}
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s done.\n",
@@ -1428,7 +1362,6 @@ int main(int argc, char **argv)
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result;
if (!test->should_run)
continue;
@@ -1444,34 +1377,7 @@ int main(int argc, char **argv)
continue;
}
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
-
- fprintf(env.stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result = &test_results[i];
- result->error_cnt = test->error_cnt;
- if (env.log_buf) {
- result->log_buf = strdup(env.log_buf);
- result->log_cnt = env.log_cnt;
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
-
- if (test->error_cnt)
- env.fail_cnt++;
- else
- env.succ_cnt++;
-
- skip_account();
- env.sub_succ_cnt += test->sub_succ_cnt;
}
if (env.get_test_cnt) {
@@ -1482,21 +1388,14 @@ int main(int argc, char **argv)
if (env.list_test_names)
goto out;
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
close(env.saved_netns_fd);
out:
if (!env.list_test_names && env.has_testmod)
unload_bpf_testmod();
- free_str_set(&env.test_selector.blacklist);
- free_str_set(&env.test_selector.whitelist);
- free(env.test_selector.num_set);
- free_str_set(&env.subtest_selector.blacklist);
- free_str_set(&env.subtest_selector.whitelist);
- free(env.subtest_selector.num_set);
+
+ free_test_selector(&env.test_selector);
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
return EXIT_NO_TEST;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 93c1ff705533..d3fee3b98888 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -25,6 +25,7 @@ typedef __u16 __sum16;
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/param.h>
#include <fcntl.h>
#include <pthread.h>
#include <linux/bpf.h>
@@ -37,7 +38,6 @@ typedef __u16 __sum16;
#include <bpf/bpf_endian.h>
#include "trace_helpers.h"
#include "testing_helpers.h"
-#include "flow_dissector_load.h"
enum verbosity {
VERBOSE_NONE,
@@ -46,18 +46,43 @@ enum verbosity {
VERBOSE_SUPER,
};
-struct str_set {
- const char **strs;
+struct test_filter {
+ char *name;
+ char **subtests;
+ int subtest_cnt;
+};
+
+struct test_filter_set {
+ struct test_filter *tests;
int cnt;
};
struct test_selector {
- struct str_set whitelist;
- struct str_set blacklist;
+ struct test_filter_set whitelist;
+ struct test_filter_set blacklist;
bool *num_set;
int num_set_len;
};
+struct test_state {
+ bool tested;
+ bool force_log;
+
+ int error_cnt;
+ int skip_cnt;
+ int subtest_skip_cnt;
+ int sub_succ_cnt;
+
+ char *subtest_name;
+ int subtest_num;
+
+ /* store counts before subtest started */
+ int old_error_cnt;
+
+ size_t log_cnt;
+ char *log_buf;
+};
+
struct test_env {
struct test_selector test_selector;
struct test_selector subtest_selector;
@@ -70,12 +95,11 @@ struct test_env {
bool get_test_cnt;
bool list_test_names;
- struct prog_test_def *test; /* current running tests */
+ struct prog_test_def *test; /* current running test */
+ struct test_state *test_state; /* current running test result */
FILE *stdout;
FILE *stderr;
- char *log_buf;
- size_t log_cnt;
int nr_cpus;
int succ_cnt; /* successful tests */
@@ -120,11 +144,12 @@ struct msg {
extern struct test_env env;
-extern void test__force_log();
-extern bool test__start_subtest(const char *name);
-extern void test__skip(void);
-extern void test__fail(void);
-extern int test__join_cgroup(const char *path);
+void test__force_log(void);
+bool test__start_subtest(const char *name);
+void test__end_subtest(void);
+void test__skip(void);
+void test__fail(void);
+int test__join_cgroup(const char *path);
#define PRINT_FAIL(format...) \
({ \
@@ -267,6 +292,17 @@ extern int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_HAS_SUBSTR(str, substr, name) ({ \
+ static int duration = 0; \
+ const char *___str = str; \
+ const char *___substr = substr; \
+ bool ___ok = strstr(___str, ___substr) != NULL; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: '%s' is not a substring of '%s'\n", \
+ (name), ___substr, ___str); \
+ ___ok; \
+})
+
#define ASSERT_OK(res, name) ({ \
static int duration = 0; \
long long ___res = (res); \
@@ -332,6 +368,8 @@ int trigger_module_test_write(int write_sz);
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
#elif defined(__s390x__)
#define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
+#elif defined(__aarch64__)
+#define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
#else
#define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
#endif
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index 4a64306728ab..3256de30f563 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#define CGROUP_PATH "/skb_cgroup_test"
@@ -160,6 +159,9 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fe10f8134278..810c3740b2cc 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -14,7 +14,6 @@
#include "cgroup_helpers.h"
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#define CG_PATH "/foo"
@@ -493,7 +492,7 @@ static int run_test_case(int cgfd, const struct sock_test *test)
goto err;
}
- if (attach_sock_prog(cgfd, progfd, test->attach_type) == -1) {
+ if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -541,6 +540,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index f3d5d7ac6505..458564fcfc82 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -19,7 +19,6 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
@@ -1418,6 +1417,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index dfb4f5c0fcb9..0fbaccdc8861 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -18,7 +18,6 @@
#include <sched.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sendfile.h>
@@ -37,7 +36,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
int running;
@@ -2017,6 +2015,9 @@ int main(int argc, char **argv)
cg_created = 1;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (test == SELFTESTS) {
err = test_selftest(cg_fd, &options);
goto out;
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index 4f6cf833b522..57620e7c9048 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -14,7 +14,6 @@
#include <bpf/libbpf.h>
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -1561,7 +1560,7 @@ static int run_test_case(int cgfd, struct sysctl_test *test)
goto err;
}
- if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) == -1) {
+ if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -1618,6 +1617,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 0851c42ee31c..5546b05a0486 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -20,7 +20,6 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
-#include "bpf_rlimit.h"
#include "testing_helpers.h"
static struct bpf_insn prog[BPF_MAXINSNS];
@@ -189,6 +188,9 @@ int main(void)
uint32_t tests = 0;
int i, fd_map;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
sizeof(int), 1, &opts);
assert(fd_map > 0);
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index e7775d3bbe08..5c8ef062f760 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
@@ -235,6 +234,9 @@ int main(int argc, char **argv)
exit(1);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp);
if (results < 0) {
log_err("Can't get map");
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4c5114765b23..8284db8b0f13 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -19,7 +19,6 @@
#include <linux/perf_event.h>
#include <linux/err.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index a2cd236c32eb..372579c9f45e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -53,7 +53,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 22
+#define MAX_NR_MAPS 23
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -101,6 +101,7 @@ struct bpf_test {
int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS];
+ int fixup_map_kptr[MAX_FIXUPS];
struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string
@@ -621,8 +622,15 @@ static int create_cgroup_storage(bool percpu)
* struct timer {
* struct bpf_timer t;
* };
+ * struct btf_ptr {
+ * struct prog_test_ref_kfunc __kptr *ptr;
+ * struct prog_test_ref_kfunc __kptr_ref *ptr;
+ * struct prog_test_member __kptr_ref *ptr;
+ * }
*/
-static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
+ "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
+ "\0prog_test_member";
static __u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -638,6 +646,22 @@ static __u32 btf_raw_types[] = {
/* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
+ /* struct prog_test_ref_kfunc */ /* [6] */
+ BTF_STRUCT_ENC(51, 0, 0),
+ BTF_STRUCT_ENC(89, 0, 0), /* [7] */
+ /* type tag "kptr" */
+ BTF_TYPE_TAG_ENC(75, 6), /* [8] */
+ /* type tag "kptr_ref" */
+ BTF_TYPE_TAG_ENC(80, 6), /* [9] */
+ BTF_TYPE_TAG_ENC(80, 7), /* [10] */
+ BTF_PTR_ENC(8), /* [11] */
+ BTF_PTR_ENC(9), /* [12] */
+ BTF_PTR_ENC(10), /* [13] */
+ /* struct btf_ptr */ /* [14] */
+ BTF_STRUCT_ENC(43, 3, 24),
+ BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
+ BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
+ BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
};
static int load_btf(void)
@@ -727,6 +751,25 @@ static int create_map_timer(void)
return fd;
}
+static int create_map_kptr(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 14,
+ );
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+
+ opts.btf_fd = btf_fd;
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
+ if (fd < 0)
+ printf("Failed to create map with btf_id pointer\n");
+ return fd;
+}
+
static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
@@ -754,6 +797,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
+ int *fixup_map_kptr = test->fixup_map_kptr;
struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
if (test->fill_helper) {
@@ -947,6 +991,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_timer++;
} while (*fixup_map_timer);
}
+ if (*fixup_map_kptr) {
+ map_fds[22] = create_map_kptr();
+ do {
+ prog[*fixup_map_kptr].imm = map_fds[22];
+ fixup_map_kptr++;
+ } while (*fixup_map_kptr);
+ }
/* Patch in kfunc BTF IDs */
if (fixup_kfunc_btf_id->kfunc) {
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 8d6918c3b4a2..70feda97cee5 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -11,8 +11,6 @@
#include <bpf/bpf.h>
-#include "bpf_rlimit.h"
-
#define LOG_SIZE (1 << 20)
#define err(str...) printf("ERROR: " str)
@@ -141,6 +139,9 @@ int main(int argc, char **argv)
memset(log, 1, LOG_SIZE);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
/* Test incorrect attr */
printf("Test log_level 0...\n");
test_log_bad(log, LOG_SIZE, 0);
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 795b6798ccee..9695318e8132 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -6,6 +6,7 @@
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "test_progs.h"
#include "testing_helpers.h"
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
@@ -60,7 +61,7 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
set[i] = true;
}
- if (!set)
+ if (!set || parsing_end)
return -EINVAL;
*num_set = set;
@@ -69,6 +70,94 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
+int parse_test_list(const char *s,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *input, *state = NULL, *next;
+ struct test_filter *tmp, *tests = NULL;
+ int i, j, cnt = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
+
+ while ((next = strtok_r(state ? NULL : input, ",", &state))) {
+ char *subtest_str = strchr(next, '/');
+ char *pattern = NULL;
+ int glob_chars = 0;
+
+ tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
+ if (!tmp)
+ goto err;
+ tests = tmp;
+
+ tests[cnt].subtest_cnt = 0;
+ tests[cnt].subtests = NULL;
+
+ if (is_glob_pattern) {
+ pattern = "%s";
+ } else {
+ pattern = "*%s*";
+ glob_chars = 2;
+ }
+
+ if (subtest_str) {
+ char **tmp_subtests = NULL;
+ int subtest_cnt = tests[cnt].subtest_cnt;
+
+ *subtest_str = '\0';
+ subtest_str += 1;
+ tmp_subtests = realloc(tests[cnt].subtests,
+ sizeof(*tmp_subtests) *
+ (subtest_cnt + 1));
+ if (!tmp_subtests)
+ goto err;
+ tests[cnt].subtests = tmp_subtests;
+
+ tests[cnt].subtests[subtest_cnt] =
+ malloc(strlen(subtest_str) + glob_chars + 1);
+ if (!tests[cnt].subtests[subtest_cnt])
+ goto err;
+ sprintf(tests[cnt].subtests[subtest_cnt],
+ pattern,
+ subtest_str);
+
+ tests[cnt].subtest_cnt++;
+ }
+
+ tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
+ if (!tests[cnt].name)
+ goto err;
+ sprintf(tests[cnt].name, pattern, next);
+
+ cnt++;
+ }
+
+ tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
+ if (!tmp)
+ goto err;
+
+ memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
+ set->tests = tmp;
+ set->cnt += cnt;
+
+ free(tests);
+ free(input);
+ return 0;
+
+err:
+ for (i = 0; i < cnt; i++) {
+ for (j = 0; j < tests[i].subtest_cnt; j++)
+ free(tests[i].subtests[j]);
+
+ free(tests[i].name);
+ }
+ free(tests);
+ free(input);
+ return -ENOMEM;
+}
+
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
{
__u32 info_len = sizeof(*info);
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index f46ebc476ee8..6ec00bf79cb5 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -12,3 +12,11 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
size_t log_buf_sz);
+
+/*
+ * below function is exported for testing in prog_test test
+ */
+struct test_filter_set;
+int parse_test_list(const char *s,
+ struct test_filter_set *test_set,
+ bool is_glob_pattern);
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 3d6217e3aff7..9c4be2cdb21a 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -25,15 +25,12 @@ static int ksym_cmp(const void *p1, const void *p2)
int load_kallsyms(void)
{
- FILE *f = fopen("/proc/kallsyms", "r");
+ FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;
- if (!f)
- return -ENOENT;
-
/*
* This is called/used from multiplace places,
* load symbols just once.
@@ -41,6 +38,10 @@ int load_kallsyms(void)
if (sym_cnt)
return 0;
+ f = fopen("/proc/kallsyms", "r");
+ if (!f)
+ return -ENOENT;
+
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
diff --git a/tools/testing/selftests/bpf/urandom_read.c b/tools/testing/selftests/bpf/urandom_read.c
index db781052758d..e92644d0fa75 100644
--- a/tools/testing/selftests/bpf/urandom_read.c
+++ b/tools/testing/selftests/bpf/urandom_read.c
@@ -1,32 +1,85 @@
+#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
+#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
+#include <signal.h>
+
+#define _SDT_HAS_SEMAPHORES 1
+#include "sdt.h"
+
+#define SEC(name) __attribute__((section(name), used))
#define BUF_SIZE 256
+/* defined in urandom_read_aux.c */
+void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz);
+/* these are coming from urandom_read_lib{1,2}.c */
+void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz);
+void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz);
+
+unsigned short urand_read_with_sema_semaphore SEC(".probes");
+
static __attribute__((noinline))
void urandom_read(int fd, int count)
{
- char buf[BUF_SIZE];
- int i;
+ char buf[BUF_SIZE];
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ read(fd, buf, BUF_SIZE);
+
+ /* trigger USDTs defined in executable itself */
+ urand_read_without_sema(i, count, BUF_SIZE);
+ STAP_PROBE3(urand, read_with_sema, i, count, BUF_SIZE);
- for (i = 0; i < count; ++i)
- read(fd, buf, BUF_SIZE);
+ /* trigger USDTs defined in shared lib */
+ urandlib_read_without_sema(i, count, BUF_SIZE);
+ urandlib_read_with_sema(i, count, BUF_SIZE);
+ }
+}
+
+static volatile bool parent_ready;
+
+static void handle_sigpipe(int sig)
+{
+ parent_ready = true;
}
int main(int argc, char *argv[])
{
int fd = open("/dev/urandom", O_RDONLY);
int count = 4;
+ bool report_pid = false;
if (fd < 0)
return 1;
- if (argc == 2)
+ if (argc >= 2)
count = atoi(argv[1]);
+ if (argc >= 3) {
+ report_pid = true;
+ /* install SIGPIPE handler to catch when parent closes their
+ * end of the pipe (on the other side of our stdout)
+ */
+ signal(SIGPIPE, handle_sigpipe);
+ }
+
+ /* report PID and wait for parent process to send us "signal" by
+ * closing stdout
+ */
+ if (report_pid) {
+ while (!parent_ready) {
+ fprintf(stdout, "%d\n", getpid());
+ fflush(stdout);
+ }
+ /* at this point stdout is closed, parent process knows our
+ * PID and is ready to trace us
+ */
+ }
urandom_read(fd, count);
diff --git a/tools/testing/selftests/bpf/urandom_read_aux.c b/tools/testing/selftests/bpf/urandom_read_aux.c
new file mode 100644
index 000000000000..6132edcfea74
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_aux.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "sdt.h"
+
+void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ /* semaphore-less USDT */
+ STAP_PROBE3(urand, read_without_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/urandom_read_lib1.c b/tools/testing/selftests/bpf/urandom_read_lib1.c
new file mode 100644
index 000000000000..86186e24b740
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_lib1.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#define _SDT_HAS_SEMAPHORES 1
+#include "sdt.h"
+
+#define SEC(name) __attribute__((section(name), used))
+
+unsigned short urandlib_read_with_sema_semaphore SEC(".probes");
+
+void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ STAP_PROBE3(urandlib, read_with_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/urandom_read_lib2.c b/tools/testing/selftests/bpf/urandom_read_lib2.c
new file mode 100644
index 000000000000..9d401ad9838f
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_lib2.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "sdt.h"
+
+void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ STAP_PROBE3(urandlib, read_without_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 2e03decb11b6..743ed34c1238 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -139,6 +139,26 @@
},
},
{
+ "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_memb_acquire", 1 },
+ { "bpf_kfunc_call_memb1_release", 5 },
+ },
+},
+{
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
new file mode 100644
index 000000000000..9113834640e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -0,0 +1,469 @@
+/* Common tests */
+{
+ "map_kptr: BPF_ST imm != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_ST imm must be 0 when storing to kptr at off=0",
+},
+{
+ "map_kptr: size != bpf_size_to_bytes(BPF_DW)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access size must be BPF_DW",
+},
+{
+ "map_kptr: map_value non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access cannot have variable offset",
+},
+{
+ "map_kptr: bpf_kptr_xchg non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 doesn't have constant offset. kptr has to be at the constant offset",
+},
+{
+ "map_kptr: unaligned boundary load/store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access misaligned expected=0 off=7",
+},
+{
+ "map_kptr: reject var_off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
+},
+/* Tests for unreferened PTR_TO_BTF_ID */
+{
+ "map_kptr: unref: reject btf_struct_ids_match == false",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc expected=ptr_prog_test",
+},
+{
+ "map_kptr: unref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
+},
+{
+ "map_kptr: unref: correct in kernel type size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 24),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "access beyond struct prog_test_ref_kfunc at off 24 size 8",
+},
+{
+ "map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: unref: no reference state created",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = ACCEPT,
+},
+{
+ "map_kptr: unref: bpf_kptr_xchg rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "off=0 kptr isn't referenced kptr",
+},
+{
+ "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "arg#0 no referenced kptr at map value offset=0",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_kptr_get", 13 },
+ }
+},
+/* Tests for referenced PTR_TO_BTF_ID */
+{
+ "map_kptr: ref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: ref: reject off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member",
+},
+{
+ "map_kptr: ref: reference state created and released on xchg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "Unreleased reference id=5 alloc_insn=20",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 15 },
+ }
+},
+{
+ "map_kptr: ref: reject STX",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: ref: reject ST",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: reject helper access to kptr",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr cannot be accessed indirectly by helper",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index fbd682520e47..57a83d763ec1 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -796,7 +796,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 86b24cad27a7..d11d0b28be41 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -417,7 +417,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_sk_fullsock(skb->sk))",
@@ -436,7 +436,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_tcp_sock(skb->sk))",
@@ -455,7 +455,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"sk_storage_get(map, skb->sk, NULL, 0): value == NULL",
diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c
index aaedbf4955c3..c03b3a75991f 100644
--- a/tools/testing/selftests/bpf/xdp_redirect_multi.c
+++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c
@@ -10,7 +10,6 @@
#include <net/if.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index c567856fd1bc..5b6f977870f8 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -12,7 +12,6 @@
#include <string.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
@@ -89,7 +88,6 @@ int main(int argc, char **argv)
{
__u32 mode_flags = XDP_FLAGS_DRV_MODE | XDP_FLAGS_SKB_MODE;
struct addrinfo *a, hints = { .ai_family = AF_INET };
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
__u16 count = XDPING_DEFAULT_COUNT;
struct pinginfo pinginfo = { 0 };
const char *optstr = "c:I:NsS";
@@ -167,10 +165,8 @@ int main(int argc, char **argv)
freeaddrinfo(a);
}
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- perror("setrlimit(RLIMIT_MEMLOCK)");
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 5f8296d29e77..cfcb031323c5 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -90,7 +90,6 @@
#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <time.h>
@@ -1448,14 +1447,13 @@ static void ifobject_delete(struct ifobject *ifobj)
int main(int argc, char **argv)
{
- struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY };
struct pkt_stream *pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
struct test_spec test;
u32 i, j;
- if (setrlimit(RLIMIT_MEMLOCK, &_rlim))
- exit_with_error(errno);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ifobj_tx = ifobject_create();
if (!ifobj_tx)
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
new file mode 120000
index 000000000000..f5eb940c4c7c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_locked_port.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
new file mode 120000
index 000000000000..76492da525f7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_mdb.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
new file mode 120000
index 000000000000..81a7e0df0474
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_mld.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
new file mode 120000
index 000000000000..9831ed74376a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_aware.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
new file mode 120000
index 000000000000..7f3c3f0bf719
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_mcast.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
new file mode 120000
index 000000000000..bf1a57e6bde1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_unaware.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/forwarding.config b/tools/testing/selftests/drivers/net/dsa/forwarding.config
new file mode 100644
index 000000000000..7adc1396fae0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/forwarding.config
@@ -0,0 +1,2 @@
+NETIF_CREATE=no
+STABLE_MAC_ADDRS=yes
diff --git a/tools/testing/selftests/drivers/net/dsa/lib.sh b/tools/testing/selftests/drivers/net/dsa/lib.sh
new file mode 120000
index 000000000000..39c96828c5ef
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/lib.sh
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/local_termination.sh b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
new file mode 120000
index 000000000000..c08166f84501
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
@@ -0,0 +1 @@
+../../../net/forwarding/local_termination.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
new file mode 120000
index 000000000000..b9757466bc97
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
@@ -0,0 +1 @@
+../../../net/forwarding/no_forwarding.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh
new file mode 100755
index 000000000000..53a65f416770
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh
@@ -0,0 +1,341 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# In addition to the common variables, user might use:
+# LC_SLOT - If not set, all probed line cards are going to be tested,
+# with an exception of the "activation_16x100G_test".
+# It set, only the selected line card is going to be used
+# for tests, including "activation_16x100G_test".
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ unprovision_test
+ provision_test
+ activation_16x100G_test
+"
+
+NUM_NETIFS=0
+
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+until_lc_state_is()
+{
+ local state=$1; shift
+ local current=$("$@")
+
+ echo "$current"
+ [ "$current" == "$state" ]
+}
+
+until_lc_state_is_not()
+{
+ ! until_lc_state_is "$@"
+}
+
+lc_state_get()
+{
+ local lc=$1
+
+ devlink lc show $DEVLINK_DEV lc $lc -j | jq -e -r ".[][][].state"
+}
+
+lc_wait_until_state_changes()
+{
+ local lc=$1
+ local state=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_state_is_not "$state" lc_state_get "$lc"
+}
+
+lc_wait_until_state_becomes()
+{
+ local lc=$1
+ local state=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_state_is "$state" lc_state_get "$lc"
+}
+
+until_lc_port_count_is()
+{
+ local port_count=$1; shift
+ local current=$("$@")
+
+ echo "$current"
+ [ $current == $port_count ]
+}
+
+lc_port_count_get()
+{
+ local lc=$1
+
+ devlink port -j | jq -e -r ".[][] | select(.lc==$lc) | .port" | wc -l
+}
+
+lc_wait_until_port_count_is()
+{
+ local lc=$1
+ local port_count=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_port_count_is "$port_count" lc_port_count_get "$lc"
+}
+
+PROV_UNPROV_TIMEOUT=8000 # ms
+POST_PROV_ACT_TIMEOUT=2000 # ms
+PROV_PORTS_INSTANTIATION_TIMEOUT=15000 # ms
+
+unprovision_one()
+{
+ local lc=$1
+ local state
+
+ state=$(lc_state_get $lc)
+ check_err $? "Failed to get state of linecard $lc"
+ if [[ "$state" == "unprovisioned" ]]; then
+ return
+ fi
+
+ log_info "Unprovisioning linecard $lc"
+
+ devlink lc set $DEVLINK_DEV lc $lc notype
+ check_err $? "Failed to trigger linecard $lc unprovisioning"
+
+ state=$(lc_wait_until_state_changes $lc "unprovisioning" \
+ $PROV_UNPROV_TIMEOUT)
+ check_err $? "Failed to unprovision linecard $lc (timeout)"
+
+ [ "$state" == "unprovisioned" ]
+ check_err $? "Failed to unprovision linecard $lc (state=$state)"
+}
+
+provision_one()
+{
+ local lc=$1
+ local type=$2
+ local state
+
+ log_info "Provisioning linecard $lc"
+
+ devlink lc set $DEVLINK_DEV lc $lc type $type
+ check_err $? "Failed trigger linecard $lc provisioning"
+
+ state=$(lc_wait_until_state_changes $lc "provisioning" \
+ $PROV_UNPROV_TIMEOUT)
+ check_err $? "Failed to provision linecard $lc (timeout)"
+
+ [ "$state" == "provisioned" ] || [ "$state" == "active" ]
+ check_err $? "Failed to provision linecard $lc (state=$state)"
+
+ provisioned_type=$(devlink lc show $DEVLINK_DEV lc $lc -j | jq -e -r ".[][][].type")
+ [ "$provisioned_type" == "$type" ]
+ check_err $? "Wrong provision type returned for linecard $lc (got \"$provisioned_type\", expected \"$type\")"
+
+ # Wait for possible activation to make sure the state
+ # won't change after return from this function.
+ state=$(lc_wait_until_state_becomes $lc "active" \
+ $POST_PROV_ACT_TIMEOUT)
+}
+
+unprovision_test()
+{
+ RET=0
+ local lc
+
+ lc=$LC_SLOT
+ unprovision_one $lc
+ log_test "Unprovision"
+}
+
+LC_16X100G_TYPE="16x100G"
+LC_16X100G_PORT_COUNT=16
+LC_16X100G_DEVICE_COUNT=4
+
+supported_types_check()
+{
+ local lc=$1
+ local supported_types_count
+ local type_index
+ local lc_16x100_found=false
+
+ supported_types_count=$(devlink lc show $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].supported_types | length")
+ [ $supported_types_count != 0 ]
+ check_err $? "No supported types found for linecard $lc"
+ for (( type_index=0; type_index<$supported_types_count; type_index++ ))
+ do
+ type=$(devlink lc show $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].supported_types[$type_index]")
+ if [[ "$type" == "$LC_16X100G_TYPE" ]]; then
+ lc_16x100_found=true
+ break
+ fi
+ done
+ [ $lc_16x100_found = true ]
+ check_err $? "16X100G not found between supported types of linecard $lc"
+}
+
+lc_info_check()
+{
+ local lc=$1
+ local fixed_hw_revision
+ local running_ini_version
+
+ fixed_hw_revision=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \
+ jq -e -r '.[][][].versions.fixed."hw.revision"')
+ check_err $? "Failed to get linecard $lc fixed.hw.revision"
+ log_info "Linecard $lc fixed.hw.revision: \"$fixed_hw_revision\""
+ running_ini_version=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \
+ jq -e -r '.[][][].versions.running."ini.version"')
+ check_err $? "Failed to get linecard $lc running.ini.version"
+ log_info "Linecard $lc running.ini.version: \"$running_ini_version\""
+}
+
+lc_devices_check()
+{
+ local lc=$1
+ local expected_device_count=$2
+ local device_count
+ local device
+
+ device_count=$(devlink lc show $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].devices |length")
+ check_err $? "Failed to get linecard $lc device count"
+ [ $device_count != 0 ]
+ check_err $? "No device found on linecard $lc"
+ [ $device_count == $expected_device_count ]
+ check_err $? "Unexpected device count on linecard $lc (got $expected_device_count, expected $device_count)"
+ for (( device=0; device<device_count; device++ ))
+ do
+ log_info "Linecard $lc device $device"
+ done
+}
+
+ports_check()
+{
+ local lc=$1
+ local expected_port_count=$2
+ local port_count
+
+ port_count=$(lc_wait_until_port_count_is $lc $expected_port_count \
+ $PROV_PORTS_INSTANTIATION_TIMEOUT)
+ [ $port_count != 0 ]
+ check_err $? "No port associated with linecard $lc"
+ [ $port_count == $expected_port_count ]
+ check_err $? "Unexpected port count linecard $lc (got $port_count, expected $expected_port_count)"
+}
+
+provision_test()
+{
+ RET=0
+ local lc
+ local type
+ local state
+
+ lc=$LC_SLOT
+ supported_types_check $lc
+ state=$(lc_state_get $lc)
+ check_err $? "Failed to get state of linecard $lc"
+ if [[ "$state" != "unprovisioned" ]]; then
+ unprovision_one $lc
+ fi
+ provision_one $lc $LC_16X100G_TYPE
+ lc_devices_check $lc $LC_16X100G_DEVICE_COUNT
+ lc_info_check $lc
+ ports_check $lc $LC_16X100G_PORT_COUNT
+ log_test "Provision"
+}
+
+ACTIVATION_TIMEOUT=20000 # ms
+
+interface_check()
+{
+ ip link set $h1 up
+ ip link set $h2 up
+ ifaces_upped=true
+ setup_wait
+}
+
+lc_devices_info_check()
+{
+ local lc=$1
+ local expected_device_count=$2
+ local device_count
+ local device
+ local running_device_fw
+
+ device_count=$(devlink lc info $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].devices |length")
+ check_err $? "Failed to get linecard $lc device count"
+ for (( device=0; device<device_count; device++ ))
+ do
+ running_device_fw=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].devices[$device].versions.running.fw")
+ check_err $? "Failed to get linecard $lc device $device running fw version"
+ log_info "Linecard $lc device $device running.fw: \"$running_device_fw\""
+ done
+}
+
+activation_16x100G_test()
+{
+ RET=0
+ local lc
+ local type
+ local state
+
+ lc=$LC_SLOT
+ type=$LC_16X100G_TYPE
+
+ unprovision_one $lc
+ provision_one $lc $type
+ state=$(lc_wait_until_state_becomes $lc "active" \
+ $ACTIVATION_TIMEOUT)
+ check_err $? "Failed to get linecard $lc activated (timeout)"
+
+ lc_devices_info_check $lc $LC_16X100G_DEVICE_COUNT
+
+ interface_check
+
+ log_test "Activation 16x100G"
+}
+
+setup_prepare()
+{
+ local lc_num=$(devlink lc show -j | jq -e -r ".[][\"$DEVLINK_DEV\"] |length")
+ if [[ $? -ne 0 ]] || [[ $lc_num -eq 0 ]]; then
+ echo "SKIP: No linecard support found"
+ exit $ksft_skip
+ fi
+
+ if [ -z "$LC_SLOT" ]; then
+ echo "SKIP: \"LC_SLOT\" variable not provided"
+ exit $ksft_skip
+ fi
+
+ # Interfaces are not present during the script start,
+ # that's why we define NUM_NETIFS here so dummy
+ # implicit veth pairs are not created.
+ NUM_NETIFS=2
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+ ifaces_upped=false
+}
+
+cleanup()
+{
+ if [ "$ifaces_upped" = true ] ; then
+ ip link set $h1 down
+ ip link set $h2 down
+ fi
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh
new file mode 100755
index 000000000000..82a47b903f92
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh
@@ -0,0 +1,480 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test sends 1Gbps of traffic through the switch, into which it then
+# injects a burst of traffic and tests that there are no drops.
+#
+# The 1Gbps stream is created by sending >1Gbps stream from H1. This stream
+# ingresses through $swp1, and is forwarded thtrough a small temporary pool to a
+# 1Gbps $swp3.
+#
+# Thus a 1Gbps stream enters $swp4, and is forwarded through a large pool to
+# $swp2, and eventually to H2. Since $swp2 is a 1Gbps port as well, no backlog
+# is generated.
+#
+# At this point, a burst of traffic is forwarded from H3. This enters $swp5, is
+# forwarded to $swp2, which is fully subscribed by the 1Gbps stream. The
+# expectation is that the burst is wholly absorbed by the large pool and no
+# drops are caused. After the burst, there should be a backlog that is hard to
+# get rid of, because $sw2 is fully subscribed. But because each individual
+# packet is scheduled soon after getting enqueued, SLL and HLL do not impact the
+# test.
+#
+# +-----------------------+ +-----------------------+
+# | H1 | | H3 |
+# | + $h1.111 | | $h3.111 + |
+# | | 192.0.2.33/28 | | 192.0.2.35/28 | |
+# | | | | | |
+# | + $h1 | | $h3 + |
+# +---|-------------------+ +--------------------+ +------------------|----+
+# | | | |
+# +---|----------------------|--------------------|----------------------|----+
+# | + $swp1 $swp3 + + $swp4 $swp5 | |
+# | | iPOOL1 iPOOL0 | | iPOOL2 iPOOL2 | |
+# | | ePOOL4 ePOOL5 | | ePOOL4 ePOOL4 | |
+# | | 1Gbps | | 1Gbps | |
+# | +-|----------------------|-+ +-|----------------------|-+ |
+# | | + $swp1.111 $swp3.111 + | | + $swp4.111 $swp5.111 + | |
+# | | | | | |
+# | | BR1 | | BR2 | |
+# | | | | | |
+# | | | | + $swp2.111 | |
+# | +--------------------------+ +---------|----------------+ |
+# | | |
+# | iPOOL0: 500KB dynamic | |
+# | iPOOL1: 500KB dynamic | |
+# | iPOOL2: 10MB dynamic + $swp2 |
+# | ePOOL4: 500KB dynamic | iPOOL0 |
+# | ePOOL5: 500KB dnamic | ePOOL6 |
+# | ePOOL6: 10MB dynamic | 1Gbps |
+# +-------------------------------------------------------|-------------------+
+# |
+# +---|-------------------+
+# | + $h2 H2 |
+# | | 1Gbps |
+# | | |
+# | + $h2.111 |
+# | 192.0.2.34/28 |
+# +-----------------------+
+#
+# iPOOL0+ePOOL4 are helper pools for control traffic etc.
+# iPOOL1+ePOOL5 are helper pools for modeling the 1Gbps stream
+# iPOOL2+ePOOL6 are pools for soaking the burst traffic
+
+ALL_TESTS="
+ ping_ipv4
+ test_8K
+ test_800
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=8
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+source mlxsw_lib.sh
+
+_1KB=1000
+_500KB=$((500 * _1KB))
+_1MB=$((1000 * _1KB))
+
+# The failure mode that this specifically tests is exhaustion of descriptor
+# buffer. The point is to produce a burst that shared buffer should be able
+# to accommodate, but produce it with small enough packets that the machine
+# runs out of the descriptor buffer space with default configuration.
+#
+# The machine therefore needs to be able to produce line rate with as small
+# packets as possible, and at the same time have large enough buffer that
+# when filled with these small packets, it runs out of descriptors.
+# Spectrum-2 is very close, but cannot perform this test. Therefore use
+# Spectrum-3 as a minimum, and permit larger burst size, and therefore
+# larger packets, to reduce spurious failures.
+#
+mlxsw_only_on_spectrum 3+ || exit
+
+BURST_SIZE=$((50000000))
+POOL_SIZE=$BURST_SIZE
+
+h1_create()
+{
+ simple_if_init $h1
+ mtu_set $h1 10000
+
+ vlan_create $h1 111 v$h1 192.0.2.33/28
+ ip link set dev $h1.111 type vlan egress-qos-map 0:1
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 111
+
+ mtu_restore $h1
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ mtu_set $h2 10000
+ ethtool -s $h2 speed 1000 autoneg off
+
+ vlan_create $h2 111 v$h2 192.0.2.34/28
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 111
+
+ ethtool -s $h2 autoneg on
+ mtu_restore $h2
+ simple_if_fini $h2
+}
+
+h3_create()
+{
+ simple_if_init $h3
+ mtu_set $h3 10000
+
+ vlan_create $h3 111 v$h3 192.0.2.35/28
+}
+
+h3_destroy()
+{
+ vlan_destroy $h3 111
+
+ mtu_restore $h3
+ simple_if_fini $h3
+}
+
+switch_create()
+{
+ # pools
+ # -----
+
+ devlink_pool_size_thtype_save 0
+ devlink_pool_size_thtype_save 4
+ devlink_pool_size_thtype_save 1
+ devlink_pool_size_thtype_save 5
+ devlink_pool_size_thtype_save 2
+ devlink_pool_size_thtype_save 6
+
+ devlink_port_pool_th_save $swp1 1
+ devlink_port_pool_th_save $swp2 6
+ devlink_port_pool_th_save $swp3 5
+ devlink_port_pool_th_save $swp4 2
+ devlink_port_pool_th_save $swp5 2
+
+ devlink_tc_bind_pool_th_save $swp1 1 ingress
+ devlink_tc_bind_pool_th_save $swp2 1 egress
+ devlink_tc_bind_pool_th_save $swp3 1 egress
+ devlink_tc_bind_pool_th_save $swp4 1 ingress
+ devlink_tc_bind_pool_th_save $swp5 1 ingress
+
+ # Control traffic pools. Just reduce the size.
+ devlink_pool_size_thtype_set 0 dynamic $_500KB
+ devlink_pool_size_thtype_set 4 dynamic $_500KB
+
+ # Stream modeling pools.
+ devlink_pool_size_thtype_set 1 dynamic $_500KB
+ devlink_pool_size_thtype_set 5 dynamic $_500KB
+
+ # Burst soak pools.
+ devlink_pool_size_thtype_set 2 static $POOL_SIZE
+ devlink_pool_size_thtype_set 6 static $POOL_SIZE
+
+ # $swp1
+ # -----
+
+ ip link set dev $swp1 up
+ mtu_set $swp1 10000
+ vlan_create $swp1 111
+ ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp1 1 16
+ devlink_tc_bind_pool_th_set $swp1 1 ingress 1 16
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp1 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp1 prio-buffer all:0 1:1
+
+ # $swp2
+ # -----
+
+ ip link set dev $swp2 up
+ mtu_set $swp2 10000
+ ethtool -s $swp2 speed 1000 autoneg off
+ vlan_create $swp2 111
+ ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp2 6 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp2 1 egress 6 $POOL_SIZE
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6)
+ tc qdisc replace dev $swp2 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+
+ # $swp3
+ # -----
+
+ ip link set dev $swp3 up
+ mtu_set $swp3 10000
+ ethtool -s $swp3 speed 1000 autoneg off
+ vlan_create $swp3 111
+ ip link set dev $swp3.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp3 5 16
+ devlink_tc_bind_pool_th_set $swp3 1 egress 5 16
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6)
+ tc qdisc replace dev $swp3 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+
+ # $swp4
+ # -----
+
+ ip link set dev $swp4 up
+ mtu_set $swp4 10000
+ ethtool -s $swp4 speed 1000 autoneg off
+ vlan_create $swp4 111
+ ip link set dev $swp4.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp4 2 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp4 1 ingress 2 $POOL_SIZE
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp4 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp4 prio-buffer all:0 1:1
+
+ # $swp5
+ # -----
+
+ ip link set dev $swp5 up
+ mtu_set $swp5 10000
+ vlan_create $swp5 111
+ ip link set dev $swp5.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp5 2 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp5 1 ingress 2 $POOL_SIZE
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp5 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp5 prio-buffer all:0 1:1
+
+ # bridges
+ # -------
+
+ ip link add name br1 type bridge vlan_filtering 0
+ ip link set dev $swp1.111 master br1
+ ip link set dev $swp3.111 master br1
+ ip link set dev br1 up
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev $swp2.111 master br2
+ ip link set dev $swp4.111 master br2
+ ip link set dev $swp5.111 master br2
+ ip link set dev br2 up
+}
+
+switch_destroy()
+{
+ # Do this first so that we can reset the limits to values that are only
+ # valid for the original static / dynamic setting.
+ devlink_pool_size_thtype_restore 6
+ devlink_pool_size_thtype_restore 5
+ devlink_pool_size_thtype_restore 4
+ devlink_pool_size_thtype_restore 2
+ devlink_pool_size_thtype_restore 1
+ devlink_pool_size_thtype_restore 0
+
+ # bridges
+ # -------
+
+ ip link set dev br2 down
+ ip link set dev $swp5.111 nomaster
+ ip link set dev $swp4.111 nomaster
+ ip link set dev $swp2.111 nomaster
+ ip link del dev br2
+
+ ip link set dev br1 down
+ ip link set dev $swp3.111 nomaster
+ ip link set dev $swp1.111 nomaster
+ ip link del dev br1
+
+ # $swp5
+ # -----
+
+ dcb buffer set dev $swp5 prio-buffer all:0
+ tc qdisc del dev $swp5 root
+
+ devlink_tc_bind_pool_th_restore $swp5 1 ingress
+ devlink_port_pool_th_restore $swp5 2
+
+ vlan_destroy $swp5 111
+ mtu_restore $swp5
+ ip link set dev $swp5 down
+
+ # $swp4
+ # -----
+
+ dcb buffer set dev $swp4 prio-buffer all:0
+ tc qdisc del dev $swp4 root
+
+ devlink_tc_bind_pool_th_restore $swp4 1 ingress
+ devlink_port_pool_th_restore $swp4 2
+
+ vlan_destroy $swp4 111
+ ethtool -s $swp4 autoneg on
+ mtu_restore $swp4
+ ip link set dev $swp4 down
+
+ # $swp3
+ # -----
+
+ tc qdisc del dev $swp3 root
+
+ devlink_tc_bind_pool_th_restore $swp3 1 egress
+ devlink_port_pool_th_restore $swp3 5
+
+ vlan_destroy $swp3 111
+ ethtool -s $swp3 autoneg on
+ mtu_restore $swp3
+ ip link set dev $swp3 down
+
+ # $swp2
+ # -----
+
+ tc qdisc del dev $swp2 root
+
+ devlink_tc_bind_pool_th_restore $swp2 1 egress
+ devlink_port_pool_th_restore $swp2 6
+
+ vlan_destroy $swp2 111
+ ethtool -s $swp2 autoneg on
+ mtu_restore $swp2
+ ip link set dev $swp2 down
+
+ # $swp1
+ # -----
+
+ dcb buffer set dev $swp1 prio-buffer all:0
+ tc qdisc del dev $swp1 root
+
+ devlink_tc_bind_pool_th_restore $swp1 1 ingress
+ devlink_port_pool_th_restore $swp1 1
+
+ vlan_destroy $swp1 111
+ mtu_restore $swp1
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ swp4=${NETIFS[p6]}
+
+ swp5=${NETIFS[p7]}
+ h3=${NETIFS[p8]}
+
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34 " h1->h2"
+ ping_test $h3 192.0.2.34 " h3->h2"
+}
+
+__test_qos_burst()
+{
+ local pktsize=$1; shift
+
+ RET=0
+
+ start_traffic_pktsize $pktsize $h1.111 192.0.2.33 192.0.2.34 $h2mac
+ sleep 1
+
+ local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
+ ((q0 == 0))
+ check_err $? "Transmit queue non-zero?"
+
+ local d0=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
+
+ local cell_size=$(devlink_cell_size_get)
+ local cells=$((BURST_SIZE / cell_size))
+ # Each packet is $pktsize of payload + headers.
+ local pkt_cells=$(((pktsize + 50 + cell_size - 1) / cell_size))
+ # How many packets can we admit:
+ local pkts=$((cells / pkt_cells))
+
+ $MZ $h3 -p $pktsize -Q 1:111 -A 192.0.2.35 -B 192.0.2.34 \
+ -a own -b $h2mac -c $pkts -t udp -q
+ sleep 1
+
+ local d1=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
+ ((d1 == d0))
+ check_err $? "Drops seen on egress port: $d0 -> $d1 ($((d1 - d0)))"
+
+ # Check that the queue is somewhat close to the burst size This
+ # makes sure that the lack of drops above was not due to port
+ # undersubscribtion.
+ local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
+ local qe=$((90 * BURST_SIZE / 100))
+ ((q0 > qe))
+ check_err $? "Queue size expected >$qe, got $q0"
+
+ stop_traffic
+ sleep 2
+
+ log_test "Burst: absorb $pkts ${pktsize}-B packets"
+}
+
+test_8K()
+{
+ __test_qos_burst 8000
+}
+
+test_800()
+{
+ __test_qos_burst 800
+}
+
+bail_on_lldpad
+
+trap cleanup EXIT
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
index f4493ef9cca1..3569ff45f7d5 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
@@ -371,9 +371,9 @@ test_tc_int_buf()
tc qdisc delete dev $swp root
}
-trap cleanup EXIT
-
bail_on_lldpad
+
+trap cleanup EXIT
setup_wait
tests_run
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 5d5622fc2758..f9858e221996 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -393,9 +393,9 @@ test_qos_pfc()
log_test "PFC"
}
-trap cleanup EXIT
-
bail_on_lldpad
+
+trap cleanup EXIT
setup_prepare
setup_wait
tests_run
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
index 1e5ad3209436..7a73057206cd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -166,12 +166,11 @@ ecn_mirror_test()
uninstall_qdisc
}
-trap cleanup EXIT
+bail_on_lldpad
+trap cleanup EXIT
setup_prepare
setup_wait
-
-bail_on_lldpad
tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
index d79a82f317d2..501d192529ac 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
@@ -73,12 +73,11 @@ red_mirror_test()
uninstall_qdisc
}
-trap cleanup EXIT
+bail_on_lldpad
+trap cleanup EXIT
setup_prepare
setup_wait
-
-bail_on_lldpad
tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh b/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh
new file mode 100755
index 000000000000..c51c83421c61
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh
@@ -0,0 +1,253 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2022 NXP
+
+# The script is mostly generic, with the exception of the
+# ethtool per-TC counter names ("rx_green_prio_${tc}")
+
+WAIT_TIME=1
+NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
+NETIF_CREATE=no
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+require_command dcb
+
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h1_vlan_create()
+{
+ local vid=$1
+
+ vlan_create $h1 $vid
+ simple_if_init $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ ip link set $h1.$vid type vlan \
+ egress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 \
+ ingress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+}
+
+h1_vlan_destroy()
+{
+ local vid=$1
+
+ simple_if_fini $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ vlan_destroy $h1 $vid
+}
+
+h2_vlan_create()
+{
+ local vid=$1
+
+ vlan_create $h2 $vid
+ simple_if_init $h2.$vid $H2_IPV4/24 $H2_IPV6/64
+ ip link set $h2.$vid type vlan \
+ egress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 \
+ ingress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+}
+
+h2_vlan_destroy()
+{
+ local vid=$1
+
+ simple_if_fini $h2.$vid $H2_IPV4/24 $H2_IPV6/64
+ vlan_destroy $h2 $vid
+}
+
+vlans_prepare()
+{
+ h1_vlan_create 100
+ h2_vlan_create 100
+
+ tc qdisc add dev ${h1}.100 clsact
+ tc filter add dev ${h1}.100 egress protocol ipv4 \
+ flower ip_proto icmp action skbedit priority 3
+ tc filter add dev ${h1}.100 egress protocol ipv6 \
+ flower ip_proto icmpv6 action skbedit priority 3
+}
+
+vlans_destroy()
+{
+ tc qdisc del dev ${h1}.100 clsact
+
+ h1_vlan_destroy 100
+ h2_vlan_destroy 100
+}
+
+switch_create()
+{
+ ip link set ${swp1} up
+ ip link set ${swp2} up
+
+ # Ports should trust VLAN PCP even with vlan_filtering=0
+ ip link add br0 type bridge
+ ip link set ${swp1} master br0
+ ip link set ${swp2} master br0
+ ip link set br0 up
+}
+
+switch_destroy()
+{
+ ip link del br0
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+ switch_destroy
+
+ vrf_cleanup
+}
+
+dscp_cs_to_tos()
+{
+ local dscp_cs=$1
+
+ # https://datatracker.ietf.org/doc/html/rfc2474
+ # 4.2.2.1 The Class Selector Codepoints
+ echo $((${dscp_cs} << 5))
+}
+
+run_test()
+{
+ local test_name=$1; shift
+ local if_name=$1; shift
+ local tc=$1; shift
+ local tos=$1; shift
+ local counter_name="rx_green_prio_${tc}"
+ local ipv4_before
+ local ipv4_after
+ local ipv6_before
+ local ipv6_after
+
+ ipv4_before=$(ethtool_stats_get ${swp1} "${counter_name}")
+ ping_do ${if_name} $H2_IPV4 "-Q ${tos}"
+ ipv4_after=$(ethtool_stats_get ${swp1} "${counter_name}")
+
+ if [ $((${ipv4_after} - ${ipv4_before})) -lt ${PING_COUNT} ]; then
+ RET=1
+ else
+ RET=0
+ fi
+ log_test "IPv4 ${test_name}"
+
+ ipv6_before=$(ethtool_stats_get ${swp1} "${counter_name}")
+ ping_do ${if_name} $H2_IPV6 "-Q ${tos}"
+ ipv6_after=$(ethtool_stats_get ${swp1} "${counter_name}")
+
+ if [ $((${ipv6_after} - ${ipv6_before})) -lt ${PING_COUNT} ]; then
+ RET=1
+ else
+ RET=0
+ fi
+ log_test "IPv6 ${test_name}"
+}
+
+port_default_prio_get()
+{
+ local if_name=$1
+ local prio
+
+ prio="$(dcb -j app show dev ${if_name} default-prio | \
+ jq '.default_prio[]')"
+ if [ -z "${prio}" ]; then
+ prio=0
+ fi
+
+ echo ${prio}
+}
+
+test_port_default()
+{
+ local orig=$(port_default_prio_get ${swp1})
+ local dmac=$(mac_get ${h2})
+
+ dcb app replace dev ${swp1} default-prio 5
+
+ run_test "Port-default QoS classification" ${h1} 5 0
+
+ dcb app replace dev ${swp1} default-prio ${orig}
+}
+
+test_vlan_pcp()
+{
+ vlans_prepare
+
+ run_test "Trusted VLAN PCP QoS classification" ${h1}.100 3 0
+
+ vlans_destroy
+}
+
+test_ip_dscp()
+{
+ local port_default=$(port_default_prio_get ${swp1})
+ local tos=$(dscp_cs_to_tos 4)
+
+ dcb app add dev ${swp1} dscp-prio CS4:4
+ run_test "Trusted DSCP QoS classification" ${h1} 4 ${tos}
+ dcb app del dev ${swp1} dscp-prio CS4:4
+
+ vlans_prepare
+ run_test "Untrusted DSCP QoS classification follows VLAN PCP" \
+ ${h1}.100 3 ${tos}
+ vlans_destroy
+
+ run_test "Untrusted DSCP QoS classification follows port default" \
+ ${h1} ${port_default} ${tos}
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="
+ test_port_default
+ test_vlan_pcp
+ test_ip_dscp
+"
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/psfp.sh b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
new file mode 100755
index 000000000000..5a5cee92c665
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
@@ -0,0 +1,327 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2021-2022 NXP
+
+# Note: On LS1028A, in lack of enough user ports, this setup requires patching
+# the device tree to use the second CPU port as a user port
+
+WAIT_TIME=1
+NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
+NETIF_CREATE=no
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/tsn_lib.sh
+
+UDS_ADDRESS_H1="/var/run/ptp4l_h1"
+UDS_ADDRESS_SWP1="/var/run/ptp4l_swp1"
+
+# Tunables
+NUM_PKTS=1000
+STREAM_VID=100
+STREAM_PRIO=6
+# Use a conservative cycle of 10 ms to allow the test to still pass when the
+# kernel has some extra overhead like lockdep etc
+CYCLE_TIME_NS=10000000
+# Create two Gate Control List entries, one OPEN and one CLOSE, of equal
+# durations
+GATE_DURATION_NS=$((${CYCLE_TIME_NS} / 2))
+# Give 2/3 of the cycle time to user space and 1/3 to the kernel
+FUDGE_FACTOR=$((${CYCLE_TIME_NS} / 3))
+# Shift the isochron base time by half the gate time, so that packets are
+# always received by swp1 close to the middle of the time slot, to minimize
+# inaccuracies due to network sync
+SHIFT_TIME_NS=$((${GATE_DURATION_NS} / 2))
+
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+# Chain number exported by the ocelot driver for
+# Per-Stream Filtering and Policing filters
+PSFP()
+{
+ echo 30000
+}
+
+psfp_chain_create()
+{
+ local if_name=$1
+
+ tc qdisc add dev $if_name clsact
+
+ tc filter add dev $if_name ingress chain 0 pref 49152 flower \
+ skip_sw action goto chain $(PSFP)
+}
+
+psfp_chain_destroy()
+{
+ local if_name=$1
+
+ tc qdisc del dev $if_name clsact
+}
+
+psfp_filter_check()
+{
+ local expected=$1
+ local packets=""
+ local drops=""
+ local stats=""
+
+ stats=$(tc -j -s filter show dev ${swp1} ingress chain $(PSFP) pref 1)
+ packets=$(echo ${stats} | jq ".[1].options.actions[].stats.packets")
+ drops=$(echo ${stats} | jq ".[1].options.actions[].stats.drops")
+
+ if ! [ "${packets}" = "${expected}" ]; then
+ printf "Expected filter to match on %d packets but matched on %d instead\n" \
+ "${expected}" "${packets}"
+ fi
+
+ echo "Hardware filter reports ${drops} drops"
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+switch_create()
+{
+ local h2_mac_addr=$(mac_get $h2)
+
+ ip link set ${swp1} up
+ ip link set ${swp2} up
+
+ ip link add br0 type bridge vlan_filtering 1
+ ip link set ${swp1} master br0
+ ip link set ${swp2} master br0
+ ip link set br0 up
+
+ bridge vlan add dev ${swp2} vid ${STREAM_VID}
+ bridge vlan add dev ${swp1} vid ${STREAM_VID}
+ # PSFP on Ocelot requires the filter to also be added to the bridge
+ # FDB, and not be removed
+ bridge fdb add dev ${swp2} \
+ ${h2_mac_addr} vlan ${STREAM_VID} static master
+
+ psfp_chain_create ${swp1}
+
+ tc filter add dev ${swp1} ingress chain $(PSFP) pref 1 \
+ protocol 802.1Q flower skip_sw \
+ dst_mac ${h2_mac_addr} vlan_id ${STREAM_VID} \
+ action gate base-time 0.000000000 \
+ sched-entry OPEN ${GATE_DURATION_NS} -1 -1 \
+ sched-entry CLOSE ${GATE_DURATION_NS} -1 -1
+}
+
+switch_destroy()
+{
+ psfp_chain_destroy ${swp1}
+ ip link del br0
+}
+
+txtime_setup()
+{
+ local if_name=$1
+
+ tc qdisc add dev ${if_name} clsact
+ # Classify PTP on TC 7 and isochron on TC 6
+ tc filter add dev ${if_name} egress protocol 0x88f7 \
+ flower action skbedit priority 7
+ tc filter add dev ${if_name} egress protocol 802.1Q \
+ flower vlan_ethtype 0xdead action skbedit priority 6
+ tc qdisc add dev ${if_name} handle 100: parent root mqprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ map 0 1 2 3 4 5 6 7 \
+ hw 1
+ # Set up TC 6 for SO_TXTIME. tc-mqprio queues count from 1.
+ tc qdisc replace dev ${if_name} parent 100:$((${STREAM_PRIO} + 1)) etf \
+ clockid CLOCK_TAI offload delta ${FUDGE_FACTOR}
+}
+
+txtime_cleanup()
+{
+ local if_name=$1
+
+ tc qdisc del dev ${if_name} root
+ tc qdisc del dev ${if_name} clsact
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+
+ txtime_setup ${h1}
+
+ # Set up swp1 as a master PHC for h1, synchronized to the local
+ # CLOCK_REALTIME.
+ phc2sys_start ${swp1} ${UDS_ADDRESS_SWP1}
+
+ # Assumption true for LS1028A: h1 and h2 use the same PHC. So by
+ # synchronizing h1 to swp1 via PTP, h2 is also implicitly synchronized
+ # to swp1 (and both to CLOCK_REALTIME).
+ ptp4l_start ${h1} true ${UDS_ADDRESS_H1}
+ ptp4l_start ${swp1} false ${UDS_ADDRESS_SWP1}
+
+ # Make sure there are no filter matches at the beginning of the test
+ psfp_filter_check 0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ptp4l_stop ${swp1}
+ ptp4l_stop ${h1}
+ phc2sys_stop
+ isochron_recv_stop
+
+ txtime_cleanup ${h1}
+
+ h2_destroy
+ h1_destroy
+ switch_destroy
+
+ vrf_cleanup
+}
+
+debug_incorrectly_dropped_packets()
+{
+ local isochron_dat=$1
+ local dropped_seqids
+ local seqid
+
+ echo "Packets incorrectly dropped:"
+
+ dropped_seqids=$(isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "%u RX hw %T\n" \
+ --printf-args "qR" | \
+ grep 'RX hw 0.000000000' | \
+ awk '{print $1}')
+
+ for seqid in ${dropped_seqids}; do
+ isochron report \
+ --input-file "${isochron_dat}" \
+ --start ${seqid} --stop ${seqid} \
+ --printf-format "seqid %u scheduled for %T, HW TX timestamp %T\n" \
+ --printf-args "qST"
+ done
+}
+
+debug_incorrectly_received_packets()
+{
+ local isochron_dat=$1
+
+ echo "Packets incorrectly received:"
+
+ isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "seqid %u scheduled for %T, HW TX timestamp %T, HW RX timestamp %T\n" \
+ --printf-args "qSTR" |
+ grep -v 'HW RX timestamp 0.000000000'
+}
+
+run_test()
+{
+ local base_time=$1
+ local expected=$2
+ local test_name=$3
+ local debug=$4
+ local isochron_dat="$(mktemp)"
+ local extra_args=""
+ local received
+
+ isochron_do \
+ "${h1}" \
+ "${h2}" \
+ "${UDS_ADDRESS_H1}" \
+ "" \
+ "${base_time}" \
+ "${CYCLE_TIME_NS}" \
+ "${SHIFT_TIME_NS}" \
+ "${NUM_PKTS}" \
+ "${STREAM_VID}" \
+ "${STREAM_PRIO}" \
+ "" \
+ "${isochron_dat}"
+
+ # Count all received packets by looking at the non-zero RX timestamps
+ received=$(isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "%u\n" --printf-args "R" | \
+ grep -w -v '0' | wc -l)
+
+ if [ "${received}" = "${expected}" ]; then
+ RET=0
+ else
+ RET=1
+ echo "Expected isochron to receive ${expected} packets but received ${received}"
+ fi
+
+ log_test "${test_name}"
+
+ if [ "$RET" = "1" ]; then
+ ${debug} "${isochron_dat}"
+ fi
+
+ rm ${isochron_dat} 2> /dev/null
+}
+
+test_gate_in_band()
+{
+ # Send packets in-band with the OPEN gate entry
+ run_test 0.000000000 ${NUM_PKTS} "In band" \
+ debug_incorrectly_dropped_packets
+
+ psfp_filter_check ${NUM_PKTS}
+}
+
+test_gate_out_of_band()
+{
+ # Send packets in-band with the CLOSE gate entry
+ run_test 0.005000000 0 "Out of band" \
+ debug_incorrectly_received_packets
+
+ psfp_filter_check $((2 * ${NUM_PKTS}))
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="
+ test_gate_in_band
+ test_gate_out_of_band
+"
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index 10e54bcca7a9..4401a654c2c0 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -215,15 +215,15 @@ test_vlan_pop()
sleep 1
- tcpdump_stop
+ tcpdump_stop $eth2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, ethertype IPv4"; then
+ if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, ethertype IPv4"; then
echo "OK"
else
echo "FAIL"
fi
- tcpdump_cleanup
+ tcpdump_cleanup $eth2
}
test_vlan_push()
@@ -236,15 +236,15 @@ test_vlan_push()
sleep 1
- tcpdump_stop
+ tcpdump_stop $eth3.100
- if tcpdump_show | grep -q "$eth2_mac > $eth3_mac"; then
+ if tcpdump_show $eth3.100 | grep -q "$eth2_mac > $eth3_mac"; then
echo "OK"
else
echo "FAIL"
fi
- tcpdump_cleanup
+ tcpdump_cleanup $eth3.100
}
test_vlan_ingress_modify()
@@ -267,15 +267,15 @@ test_vlan_ingress_modify()
sleep 1
- tcpdump_stop
+ tcpdump_stop $eth2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
+ if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
echo "OK"
else
echo "FAIL"
fi
- tcpdump_cleanup
+ tcpdump_cleanup $eth2
tc filter del dev $eth0 ingress chain $(IS1 2) pref 3
@@ -305,15 +305,15 @@ test_vlan_egress_modify()
sleep 1
- tcpdump_stop
+ tcpdump_stop $eth2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
+ if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
echo "OK"
else
echo "FAIL"
fi
- tcpdump_cleanup
+ tcpdump_cleanup $eth2
tc filter del dev $eth1 egress chain $(ES0) pref 3
tc qdisc del dev $eth1 clsact
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 0f2ebc38d893..0fbdacfdcd6a 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -36,6 +36,7 @@ TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
+TEST_PROGS += ndisc_unsolicited_na_test.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index 4f70baad867d..bbe3b379927a 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -20,6 +20,7 @@ SRC_IP6=2001:db8:1::3
DEV_ADDR=192.51.100.1
DEV_ADDR6=2001:db8:1::1
DEV=dummy0
+TESTS="fib_rule6 fib_rule4"
log_test()
{
@@ -316,7 +317,16 @@ fi
# start clean
cleanup &> /dev/null
setup
-run_fibrule_tests
+for t in $TESTS
+do
+ case $t in
+ fib_rule6_test|fib_rule6) fib_rule6_test;;
+ fib_rule4_test|fib_rule4) fib_rule4_test;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+
+ esac
+done
cleanup
if [ "$TESTS" != "none" ]; then
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index c87e674b61b1..0912f5ae7f6b 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -2,6 +2,7 @@
TEST_PROGS = bridge_igmp.sh \
bridge_locked_port.sh \
+ bridge_mdb.sh \
bridge_mld.sh \
bridge_port_isolation.sh \
bridge_sticky_fdb.sh \
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
new file mode 100755
index 000000000000..b1ba6876dd86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Verify that adding host mdb entries work as intended for all types of
+# multicast filters: ipv4, ipv6, and mac
+
+ALL_TESTS="mdb_add_del_test"
+NUM_NETIFS=2
+
+TEST_GROUP_IP4="225.1.2.3"
+TEST_GROUP_IP6="ff02::42"
+TEST_GROUP_MAC="01:00:01:c0:ff:ee"
+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+switch_create()
+{
+ # Enable multicast filtering
+ ip link add dev br0 type bridge mcast_snooping 1
+
+ ip link set dev $swp1 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp1 down
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ vrf_prepare
+
+ h1_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+do_mdb_add_del()
+{
+ local group=$1
+ local flag=$2
+
+ RET=0
+ bridge mdb add dev br0 port br0 grp $group $flag 2>/dev/null
+ check_err $? "Failed adding $group to br0, port br0"
+
+ if [ -z "$flag" ]; then
+ flag="temp"
+ fi
+
+ bridge mdb show dev br0 | grep $group | grep -q $flag 2>/dev/null
+ check_err $? "$group not added with $flag flag"
+
+ bridge mdb del dev br0 port br0 grp $group 2>/dev/null
+ check_err $? "Failed deleting $group from br0, port br0"
+
+ bridge mdb show dev br0 | grep -q $group >/dev/null
+ check_err_fail 1 $? "$group still in mdb after delete"
+
+ log_test "MDB add/del group $group to bridge port br0"
+}
+
+mdb_add_del_test()
+{
+ do_mdb_add_del $TEST_GROUP_MAC permanent
+ do_mdb_add_del $TEST_GROUP_IP4
+ do_mdb_add_del $TEST_GROUP_IP6
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 664b9ecaf228..66681a2bcdd3 100644..100755
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -27,6 +27,9 @@ INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600}
LOW_AGEING_TIME=${LOW_AGEING_TIME:=1000}
REQUIRE_JQ=${REQUIRE_JQ:=yes}
REQUIRE_MZ=${REQUIRE_MZ:=yes}
+REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no}
+STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
+TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -159,6 +162,12 @@ fi
if [[ "$REQUIRE_MZ" = "yes" ]]; then
require_command $MZ
fi
+if [[ "$REQUIRE_MTOOLS" = "yes" ]]; then
+ # https://github.com/vladimiroltean/mtools/
+ # patched for IPv6 support
+ require_command msend
+ require_command mreceive
+fi
if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\""
@@ -214,10 +223,41 @@ create_netif()
esac
}
+declare -A MAC_ADDR_ORIG
+mac_addr_prepare()
+{
+ local new_addr=
+ local dev=
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ dev=${NETIFS[p$i]}
+ new_addr=$(printf "00:01:02:03:04:%02x" $i)
+
+ MAC_ADDR_ORIG["$dev"]=$(ip -j link show dev $dev | jq -e '.[].address')
+ # Strip quotes
+ MAC_ADDR_ORIG["$dev"]=${MAC_ADDR_ORIG["$dev"]//\"/}
+ ip link set dev $dev address $new_addr
+ done
+}
+
+mac_addr_restore()
+{
+ local dev=
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ dev=${NETIFS[p$i]}
+ ip link set dev $dev address ${MAC_ADDR_ORIG["$dev"]}
+ done
+}
+
if [[ "$NETIF_CREATE" = "yes" ]]; then
create_netif
fi
+if [[ "$STABLE_MAC_ADDRS" = "yes" ]]; then
+ mac_addr_prepare
+fi
+
for ((i = 1; i <= NUM_NETIFS; ++i)); do
ip link show dev ${NETIFS[p$i]} &> /dev/null
if [[ $? -ne 0 ]]; then
@@ -503,6 +543,10 @@ pre_cleanup()
echo "Pausing before cleanup, hit any key to continue"
read
fi
+
+ if [[ "$STABLE_MAC_ADDRS" = "yes" ]]; then
+ mac_addr_restore
+ fi
}
vrf_prepare()
@@ -824,6 +868,15 @@ mac_get()
ip -j link show dev $if_name | jq -r '.[]["address"]'
}
+ipv6_lladdr_get()
+{
+ local if_name=$1
+
+ ip -j addr show dev $if_name | \
+ jq -r '.[]["addr_info"][] | select(.scope == "link").local' | \
+ head -1
+}
+
bridge_ageing_time_get()
{
local bridge=$1
@@ -1322,25 +1375,40 @@ flood_test()
__start_traffic()
{
+ local pktsize=$1; shift
local proto=$1; shift
local h_in=$1; shift # Where the traffic egresses the host
local sip=$1; shift
local dip=$1; shift
local dmac=$1; shift
- $MZ $h_in -p 8000 -A $sip -B $dip -c 0 \
+ $MZ $h_in -p $pktsize -A $sip -B $dip -c 0 \
-a own -b $dmac -t "$proto" -q "$@" &
sleep 1
}
+start_traffic_pktsize()
+{
+ local pktsize=$1; shift
+
+ __start_traffic $pktsize udp "$@"
+}
+
+start_tcp_traffic_pktsize()
+{
+ local pktsize=$1; shift
+
+ __start_traffic $pktsize tcp "$@"
+}
+
start_traffic()
{
- __start_traffic udp "$@"
+ start_traffic_pktsize 8000 "$@"
}
start_tcp_traffic()
{
- __start_traffic tcp "$@"
+ start_tcp_traffic_pktsize 8000 "$@"
}
stop_traffic()
@@ -1349,13 +1417,17 @@ stop_traffic()
{ kill %% && wait %%; } 2>/dev/null
}
+declare -A cappid
+declare -A capfile
+declare -A capout
+
tcpdump_start()
{
local if_name=$1; shift
local ns=$1; shift
- capfile=$(mktemp)
- capout=$(mktemp)
+ capfile[$if_name]=$(mktemp)
+ capout[$if_name]=$(mktemp)
if [ -z $ns ]; then
ns_cmd=""
@@ -1369,27 +1441,35 @@ tcpdump_start()
capuser="-Z $SUDO_USER"
fi
- $ns_cmd tcpdump -e -n -Q in -i $if_name \
- -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
- cappid=$!
+ $ns_cmd tcpdump $TCPDUMP_EXTRA_FLAGS -e -n -Q in -i $if_name \
+ -s 65535 -B 32768 $capuser -w ${capfile[$if_name]} \
+ > "${capout[$if_name]}" 2>&1 &
+ cappid[$if_name]=$!
sleep 1
}
tcpdump_stop()
{
- $ns_cmd kill $cappid
+ local if_name=$1
+ local pid=${cappid[$if_name]}
+
+ $ns_cmd kill "$pid" && wait "$pid"
sleep 1
}
tcpdump_cleanup()
{
- rm $capfile $capout
+ local if_name=$1
+
+ rm ${capfile[$if_name]} ${capout[$if_name]}
}
tcpdump_show()
{
- tcpdump -e -n -r $capfile 2>&1
+ local if_name=$1
+
+ tcpdump -e -n -r ${capfile[$if_name]} 2>&1
}
# return 0 if the packet wasn't seen on host2_if or 1 if it was
@@ -1499,6 +1579,37 @@ brmcast_check_sg_state()
done
}
+mc_join()
+{
+ local if_name=$1
+ local group=$2
+ local vrf_name=$(master_name_get $if_name)
+
+ # We don't care about actual reception, just about joining the
+ # IP multicast group and adding the L2 address to the device's
+ # MAC filtering table
+ ip vrf exec $vrf_name \
+ mreceive -g $group -I $if_name > /dev/null 2>&1 &
+ mreceive_pid=$!
+
+ sleep 1
+}
+
+mc_leave()
+{
+ kill "$mreceive_pid" && wait "$mreceive_pid"
+}
+
+mc_send()
+{
+ local if_name=$1
+ local groups=$2
+ local vrf_name=$(master_name_get $if_name)
+
+ ip vrf exec $vrf_name \
+ msend -g $groups -I $if_name -c 1 > /dev/null 2>&1
+}
+
start_ip_monitor()
{
local mtype=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
new file mode 100755
index 000000000000..c5b0cbc85b3e
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/local_termination.sh
@@ -0,0 +1,299 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="standalone bridge"
+NUM_NETIFS=2
+PING_COUNT=1
+REQUIRE_MTOOLS=yes
+REQUIRE_MZ=no
+
+source lib.sh
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+BRIDGE_ADDR="00:00:de:ad:be:ee"
+MACVLAN_ADDR="00:00:de:ad:be:ef"
+UNKNOWN_UC_ADDR1="de:ad:be:ef:ee:03"
+UNKNOWN_UC_ADDR2="de:ad:be:ef:ee:04"
+UNKNOWN_UC_ADDR3="de:ad:be:ef:ee:05"
+JOINED_IPV4_MC_ADDR="225.1.2.3"
+UNKNOWN_IPV4_MC_ADDR1="225.1.2.4"
+UNKNOWN_IPV4_MC_ADDR2="225.1.2.5"
+UNKNOWN_IPV4_MC_ADDR3="225.1.2.6"
+JOINED_IPV6_MC_ADDR="ff2e::0102:0304"
+UNKNOWN_IPV6_MC_ADDR1="ff2e::0102:0305"
+UNKNOWN_IPV6_MC_ADDR2="ff2e::0102:0306"
+UNKNOWN_IPV6_MC_ADDR3="ff2e::0102:0307"
+
+JOINED_MACV4_MC_ADDR="01:00:5e:01:02:03"
+UNKNOWN_MACV4_MC_ADDR1="01:00:5e:01:02:04"
+UNKNOWN_MACV4_MC_ADDR2="01:00:5e:01:02:05"
+UNKNOWN_MACV4_MC_ADDR3="01:00:5e:01:02:06"
+JOINED_MACV6_MC_ADDR="33:33:01:02:03:04"
+UNKNOWN_MACV6_MC_ADDR1="33:33:01:02:03:05"
+UNKNOWN_MACV6_MC_ADDR2="33:33:01:02:03:06"
+UNKNOWN_MACV6_MC_ADDR3="33:33:01:02:03:07"
+
+NON_IP_MC="01:02:03:04:05:06"
+NON_IP_PKT="00:04 48:45:4c:4f"
+BC="ff:ff:ff:ff:ff:ff"
+
+# Disable promisc to ensure we don't receive unknown MAC DA packets
+export TCPDUMP_EXTRA_FLAGS="-pl"
+
+h1=${NETIFS[p1]}
+h2=${NETIFS[p2]}
+
+send_non_ip()
+{
+ local if_name=$1
+ local smac=$2
+ local dmac=$3
+
+ $MZ -q $if_name "$dmac $smac $NON_IP_PKT"
+}
+
+send_uc_ipv4()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip neigh add $H2_IPV4 lladdr $dmac dev $if_name
+ ping_do $if_name $H2_IPV4
+ ip neigh del $H2_IPV4 dev $if_name
+}
+
+check_rcv()
+{
+ local if_name=$1
+ local type=$2
+ local pattern=$3
+ local should_receive=$4
+ local should_fail=
+
+ [ $should_receive = true ] && should_fail=0 || should_fail=1
+ RET=0
+
+ tcpdump_show $if_name | grep -q "$pattern"
+
+ check_err_fail "$should_fail" "$?" "reception"
+
+ log_test "$if_name: $type"
+}
+
+mc_route_prepare()
+{
+ local if_name=$1
+ local vrf_name=$(master_name_get $if_name)
+
+ ip route add 225.100.1.0/24 dev $if_name vrf $vrf_name
+ ip -6 route add ff2e::/64 dev $if_name vrf $vrf_name
+}
+
+mc_route_destroy()
+{
+ local if_name=$1
+ local vrf_name=$(master_name_get $if_name)
+
+ ip route del 225.100.1.0/24 dev $if_name vrf $vrf_name
+ ip -6 route del ff2e::/64 dev $if_name vrf $vrf_name
+}
+
+run_test()
+{
+ local rcv_if_name=$1
+ local smac=$(mac_get $h1)
+ local rcv_dmac=$(mac_get $rcv_if_name)
+
+ tcpdump_start $rcv_if_name
+
+ mc_route_prepare $h1
+ mc_route_prepare $rcv_if_name
+
+ send_uc_ipv4 $h1 $rcv_dmac
+ send_uc_ipv4 $h1 $MACVLAN_ADDR
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR1
+
+ ip link set dev $rcv_if_name promisc on
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR2
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR2
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR2
+ ip link set dev $rcv_if_name promisc off
+
+ mc_join $rcv_if_name $JOINED_IPV4_MC_ADDR
+ mc_send $h1 $JOINED_IPV4_MC_ADDR
+ mc_leave
+
+ mc_join $rcv_if_name $JOINED_IPV6_MC_ADDR
+ mc_send $h1 $JOINED_IPV6_MC_ADDR
+ mc_leave
+
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR1
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR1
+
+ ip link set dev $rcv_if_name allmulticast on
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR3
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR3
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR3
+ ip link set dev $rcv_if_name allmulticast off
+
+ mc_route_destroy $rcv_if_name
+ mc_route_destroy $h1
+
+ sleep 1
+
+ tcpdump_stop $rcv_if_name
+
+ check_rcv $rcv_if_name "Unicast IPv4 to primary MAC address" \
+ "$smac > $rcv_dmac, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to macvlan MAC address" \
+ "$smac > $MACVLAN_ADDR, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address" \
+ "$smac > $UNKNOWN_UC_ADDR1, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address, promisc" \
+ "$smac > $UNKNOWN_UC_ADDR2, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address, allmulti" \
+ "$smac > $UNKNOWN_UC_ADDR3, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv4 to joined group" \
+ "$smac > $JOINED_MACV4_MC_ADDR, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR1, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group, promisc" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR2, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group, allmulti" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR3, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to joined group" \
+ "$smac > $JOINED_MACV6_MC_ADDR, ethertype IPv6 (0x86dd)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR1, ethertype IPv6 (0x86dd)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group, promisc" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR2, ethertype IPv6 (0x86dd)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group, allmulti" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR3, ethertype IPv6 (0x86dd)" \
+ true
+
+ tcpdump_cleanup $rcv_if_name
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+bridge_create()
+{
+ ip link add br0 type bridge
+ ip link set br0 address $BRIDGE_ADDR
+ ip link set br0 up
+
+ ip link set $h2 master br0
+ ip link set $h2 up
+
+ simple_if_init br0 $H2_IPV4/24 $H2_IPV6/64
+}
+
+bridge_destroy()
+{
+ simple_if_fini br0 $H2_IPV4/24 $H2_IPV6/64
+
+ ip link del br0
+}
+
+standalone()
+{
+ h1_create
+ h2_create
+
+ ip link add link $h2 name macvlan0 type macvlan mode private
+ ip link set macvlan0 address $MACVLAN_ADDR
+ ip link set macvlan0 up
+
+ run_test $h2
+
+ ip link del macvlan0
+
+ h2_destroy
+ h1_destroy
+}
+
+bridge()
+{
+ h1_create
+ bridge_create
+
+ ip link add link br0 name macvlan0 type macvlan mode private
+ ip link set macvlan0 address $MACVLAN_ADDR
+ ip link set macvlan0 up
+
+ run_test br0
+
+ ip link del macvlan0
+
+ bridge_destroy
+ h1_destroy
+}
+
+cleanup()
+{
+ pre_cleanup
+ vrf_cleanup
+}
+
+setup_prepare()
+{
+ vrf_prepare
+ # setup_wait() needs this
+ ip link set $h1 up
+ ip link set $h2 up
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
new file mode 100755
index 000000000000..af3b398d13f0
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
@@ -0,0 +1,261 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="standalone two_bridges one_bridge_two_pvids"
+NUM_NETIFS=4
+
+source lib.sh
+
+h1=${NETIFS[p1]}
+h2=${NETIFS[p3]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+IPV4_ALLNODES="224.0.0.1"
+IPV6_ALLNODES="ff02::1"
+MACV4_ALLNODES="01:00:5e:00:00:01"
+MACV6_ALLNODES="33:33:00:00:00:01"
+NON_IP_MC="01:02:03:04:05:06"
+NON_IP_PKT="00:04 48:45:4c:4f"
+BC="ff:ff:ff:ff:ff:ff"
+
+# The full 4K VLAN space is too much to check, so strategically pick some
+# values which should provide reasonable coverage
+vids=(0 1 2 5 10 20 50 100 200 500 1000 1000 2000 4000 4094)
+
+send_non_ip()
+{
+ local if_name=$1
+ local smac=$2
+ local dmac=$3
+
+ $MZ -q $if_name "$dmac $smac $NON_IP_PKT"
+}
+
+send_uc_ipv4()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip neigh add $H2_IPV4 lladdr $dmac dev $if_name
+ ping_do $if_name $H2_IPV4
+ ip neigh del $H2_IPV4 dev $if_name
+}
+
+send_mc_ipv4()
+{
+ local if_name=$1
+
+ ping_do $if_name $IPV4_ALLNODES "-I $if_name"
+}
+
+send_uc_ipv6()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip -6 neigh add $H2_IPV6 lladdr $dmac dev $if_name
+ ping6_do $if_name $H2_IPV6
+ ip -6 neigh del $H2_IPV6 dev $if_name
+}
+
+send_mc_ipv6()
+{
+ local if_name=$1
+
+ ping6_do $if_name $IPV6_ALLNODES%$if_name
+}
+
+check_rcv()
+{
+ local if_name=$1
+ local type=$2
+ local pattern=$3
+ local should_fail=1
+
+ RET=0
+
+ tcpdump_show $if_name | grep -q "$pattern"
+
+ check_err_fail "$should_fail" "$?" "reception"
+
+ log_test "$type"
+}
+
+run_test()
+{
+ local test_name="$1"
+ local smac=$(mac_get $h1)
+ local dmac=$(mac_get $h2)
+ local h1_ipv6_lladdr=$(ipv6_lladdr_get $h1)
+ local vid=
+
+ echo "$test_name: Sending packets"
+
+ tcpdump_start $h2
+
+ send_non_ip $h1 $smac $dmac
+ send_non_ip $h1 $smac $NON_IP_MC
+ send_non_ip $h1 $smac $BC
+ send_uc_ipv4 $h1 $dmac
+ send_mc_ipv4 $h1
+ send_uc_ipv6 $h1 $dmac
+ send_mc_ipv6 $h1
+
+ for vid in "${vids[@]}"; do
+ vlan_create $h1 $vid
+ simple_if_init $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+
+ send_non_ip $h1.$vid $smac $dmac
+ send_non_ip $h1.$vid $smac $NON_IP_MC
+ send_non_ip $h1.$vid $smac $BC
+ send_uc_ipv4 $h1.$vid $dmac
+ send_mc_ipv4 $h1.$vid
+ send_uc_ipv6 $h1.$vid $dmac
+ send_mc_ipv6 $h1.$vid
+
+ simple_if_fini $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ vlan_destroy $h1 $vid
+ done
+
+ sleep 1
+
+ echo "$test_name: Checking which packets were received"
+
+ tcpdump_stop $h2
+
+ check_rcv $h2 "$test_name: Unicast non-IP untagged" \
+ "$smac > $dmac, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Multicast non-IP untagged" \
+ "$smac > $NON_IP_MC, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Broadcast non-IP untagged" \
+ "$smac > $BC, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Unicast IPv4 untagged" \
+ "$smac > $dmac, ethertype IPv4 (0x0800)"
+
+ check_rcv $h2 "$test_name: Multicast IPv4 untagged" \
+ "$smac > $MACV4_ALLNODES, ethertype IPv4 (0x0800).*: $H1_IPV4 > $IPV4_ALLNODES"
+
+ check_rcv $h2 "$test_name: Unicast IPv6 untagged" \
+ "$smac > $dmac, ethertype IPv6 (0x86dd).*8: $H1_IPV6 > $H2_IPV6"
+
+ check_rcv $h2 "$test_name: Multicast IPv6 untagged" \
+ "$smac > $MACV6_ALLNODES, ethertype IPv6 (0x86dd).*: $h1_ipv6_lladdr > $IPV6_ALLNODES"
+
+ for vid in "${vids[@]}"; do
+ check_rcv $h2 "$test_name: Unicast non-IP VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Multicast non-IP VID $vid" \
+ "$smac > $NON_IP_MC, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Broadcast non-IP VID $vid" \
+ "$smac > $BC, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Unicast IPv4 VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv4 (0x0800), $H1_IPV4 > $H2_IPV4"
+
+ check_rcv $h2 "$test_name: Multicast IPv4 VID $vid" \
+ "$smac > $MACV4_ALLNODES, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv4 (0x0800), $H1_IPV4 > $IPV4_ALLNODES"
+
+ check_rcv $h2 "$test_name: Unicast IPv6 VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv6 (0x86dd), $H1_IPV6 > $H2_IPV6"
+
+ check_rcv $h2 "$test_name: Multicast IPv6 VID $vid" \
+ "$smac > $MACV6_ALLNODES, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv6 (0x86dd), $h1_ipv6_lladdr > $IPV6_ALLNODES"
+ done
+
+ tcpdump_cleanup $h2
+}
+
+standalone()
+{
+ run_test "Standalone switch ports"
+}
+
+two_bridges()
+{
+ ip link add br0 type bridge && ip link set br0 up
+ ip link add br1 type bridge && ip link set br1 up
+ ip link set $swp1 master br0
+ ip link set $swp2 master br1
+
+ run_test "Switch ports in different bridges"
+
+ ip link del br1
+ ip link del br0
+}
+
+one_bridge_two_pvids()
+{
+ ip link add br0 type bridge vlan_filtering 1 vlan_default_pvid 0
+ ip link set br0 up
+ ip link set $swp1 master br0
+ ip link set $swp2 master br0
+
+ bridge vlan add dev $swp1 vid 1 pvid untagged
+ bridge vlan add dev $swp1 vid 2 pvid untagged
+
+ run_test "Switch ports in VLAN-aware bridge with different PVIDs"
+
+ ip link del br0
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ # we call simple_if_init from the test itself, but setup_wait expects
+ # that we call it from here, and waits until the interfaces are up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router.sh b/tools/testing/selftests/net/forwarding/router.sh
index 057f91b05098..b98ea9449b8b 100755
--- a/tools/testing/selftests/net/forwarding/router.sh
+++ b/tools/testing/selftests/net/forwarding/router.sh
@@ -1,6 +1,24 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# +--------------------+ +----------------------+
+# | H1 | | H2 |
+# | | | |
+# | $h1 + | | + $h2 |
+# | 192.0.2.2/24 | | | | 198.51.100.2/24 |
+# | 2001:db8:1::2/64 | | | | 2001:db8:2::2/64 |
+# | | | | | |
+# +------------------|-+ +-|--------------------+
+# | |
+# +------------------|-------------------------|--------------------+
+# | SW | | |
+# | | | |
+# | $rp1 + + $rp2 |
+# | 192.0.2.1/24 198.51.100.1/24 |
+# | 2001:db8:1::1/64 2001:db8:2::1/64 |
+# | |
+# +-----------------------------------------------------------------+
+
ALL_TESTS="
ping_ipv4
ping_ipv6
diff --git a/tools/testing/selftests/net/forwarding/router_vid_1.sh b/tools/testing/selftests/net/forwarding/router_vid_1.sh
index a7306c7ac06d..865c9f7d8143 100755
--- a/tools/testing/selftests/net/forwarding/router_vid_1.sh
+++ b/tools/testing/selftests/net/forwarding/router_vid_1.sh
@@ -1,7 +1,32 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6"
+# +--------------------+ +----------------------+
+# | H1 | | H2 |
+# | | | |
+# | $h1.1 + | | + $h2.1 |
+# | 192.0.2.2/24 | | | | 198.51.100.2/24 |
+# | 2001:db8:1::2/64 | | | | 2001:db8:2::2/64 |
+# | | | | | |
+# | $h1 + | | + $h2 |
+# | | | | | |
+# +------------------|-+ +-|--------------------+
+# | |
+# +------------------|-------------------------|--------------------+
+# | SW | | |
+# | | | |
+# | $rp1 + + $rp2 |
+# | | | |
+# | $rp1.1 + + $rp2.1 |
+# | 192.0.2.1/24 198.51.100.1/24 |
+# | 2001:db8:1::1/64 2001:db8:2::1/64 |
+# | |
+# +-----------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+"
NUM_NETIFS=4
source lib.sh
diff --git a/tools/testing/selftests/net/forwarding/tsn_lib.sh b/tools/testing/selftests/net/forwarding/tsn_lib.sh
new file mode 100644
index 000000000000..60a1423e8116
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tsn_lib.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2021-2022 NXP
+
+REQUIRE_ISOCHRON=${REQUIRE_ISOCHRON:=yes}
+REQUIRE_LINUXPTP=${REQUIRE_LINUXPTP:=yes}
+
+# Tunables
+UTC_TAI_OFFSET=37
+ISOCHRON_CPU=1
+
+if [[ "$REQUIRE_ISOCHRON" = "yes" ]]; then
+ # https://github.com/vladimiroltean/tsn-scripts
+ # WARNING: isochron versions pre-1.0 are unstable,
+ # always use the latest version
+ require_command isochron
+fi
+if [[ "$REQUIRE_LINUXPTP" = "yes" ]]; then
+ require_command phc2sys
+ require_command ptp4l
+fi
+
+phc2sys_start()
+{
+ local if_name=$1
+ local uds_address=$2
+ local extra_args=""
+
+ if ! [ -z "${uds_address}" ]; then
+ extra_args="${extra_args} -z ${uds_address}"
+ fi
+
+ phc2sys_log="$(mktemp)"
+
+ chrt -f 10 phc2sys -m \
+ -c ${if_name} \
+ -s CLOCK_REALTIME \
+ -O ${UTC_TAI_OFFSET} \
+ --step_threshold 0.00002 \
+ --first_step_threshold 0.00002 \
+ ${extra_args} \
+ > "${phc2sys_log}" 2>&1 &
+ phc2sys_pid=$!
+
+ echo "phc2sys logs to ${phc2sys_log} and has pid ${phc2sys_pid}"
+
+ sleep 1
+}
+
+phc2sys_stop()
+{
+ { kill ${phc2sys_pid} && wait ${phc2sys_pid}; } 2> /dev/null
+ rm "${phc2sys_log}" 2> /dev/null
+}
+
+ptp4l_start()
+{
+ local if_name=$1
+ local slave_only=$2
+ local uds_address=$3
+ local log="ptp4l_log_${if_name}"
+ local pid="ptp4l_pid_${if_name}"
+ local extra_args=""
+
+ if [ "${slave_only}" = true ]; then
+ extra_args="${extra_args} -s"
+ fi
+
+ # declare dynamic variables ptp4l_log_${if_name} and ptp4l_pid_${if_name}
+ # as global, so that they can be referenced later
+ declare -g "${log}=$(mktemp)"
+
+ chrt -f 10 ptp4l -m -2 -P \
+ -i ${if_name} \
+ --step_threshold 0.00002 \
+ --first_step_threshold 0.00002 \
+ --tx_timestamp_timeout 100 \
+ --uds_address="${uds_address}" \
+ ${extra_args} \
+ > "${!log}" 2>&1 &
+ declare -g "${pid}=$!"
+
+ echo "ptp4l for interface ${if_name} logs to ${!log} and has pid ${!pid}"
+
+ sleep 1
+}
+
+ptp4l_stop()
+{
+ local if_name=$1
+ local log="ptp4l_log_${if_name}"
+ local pid="ptp4l_pid_${if_name}"
+
+ { kill ${!pid} && wait ${!pid}; } 2> /dev/null
+ rm "${!log}" 2> /dev/null
+}
+
+cpufreq_max()
+{
+ local cpu=$1
+ local freq="cpu${cpu}_freq"
+ local governor="cpu${cpu}_governor"
+
+ # Kernel may be compiled with CONFIG_CPU_FREQ disabled
+ if ! [ -d /sys/bus/cpu/devices/cpu${cpu}/cpufreq ]; then
+ return
+ fi
+
+ # declare dynamic variables cpu${cpu}_freq and cpu${cpu}_governor as
+ # global, so they can be referenced later
+ declare -g "${freq}=$(cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq)"
+ declare -g "${governor}=$(cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor)"
+
+ cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_max_freq > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq
+ echo -n "performance" > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor
+}
+
+cpufreq_restore()
+{
+ local cpu=$1
+ local freq="cpu${cpu}_freq"
+ local governor="cpu${cpu}_governor"
+
+ if ! [ -d /sys/bus/cpu/devices/cpu${cpu}/cpufreq ]; then
+ return
+ fi
+
+ echo "${!freq}" > /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq
+ echo -n "${!governor}" > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor
+}
+
+isochron_recv_start()
+{
+ local if_name=$1
+ local uds=$2
+ local extra_args=$3
+
+ if ! [ -z "${uds}" ]; then
+ extra_args="--unix-domain-socket ${uds}"
+ fi
+
+ isochron rcv \
+ --interface ${if_name} \
+ --sched-priority 98 \
+ --sched-fifo \
+ --utc-tai-offset ${UTC_TAI_OFFSET} \
+ --quiet \
+ ${extra_args} & \
+ isochron_pid=$!
+
+ sleep 1
+}
+
+isochron_recv_stop()
+{
+ { kill ${isochron_pid} && wait ${isochron_pid}; } 2> /dev/null
+}
+
+isochron_do()
+{
+ local sender_if_name=$1; shift
+ local receiver_if_name=$1; shift
+ local sender_uds=$1; shift
+ local receiver_uds=$1; shift
+ local base_time=$1; shift
+ local cycle_time=$1; shift
+ local shift_time=$1; shift
+ local num_pkts=$1; shift
+ local vid=$1; shift
+ local priority=$1; shift
+ local dst_ip=$1; shift
+ local isochron_dat=$1; shift
+ local extra_args=""
+ local receiver_extra_args=""
+ local vrf="$(master_name_get ${sender_if_name})"
+ local use_l2="true"
+
+ if ! [ -z "${dst_ip}" ]; then
+ use_l2="false"
+ fi
+
+ if ! [ -z "${vrf}" ]; then
+ dst_ip="${dst_ip}%${vrf}"
+ fi
+
+ if ! [ -z "${vid}" ]; then
+ vid="--vid=${vid}"
+ fi
+
+ if [ -z "${receiver_uds}" ]; then
+ extra_args="${extra_args} --omit-remote-sync"
+ fi
+
+ if ! [ -z "${shift_time}" ]; then
+ extra_args="${extra_args} --shift-time=${shift_time}"
+ fi
+
+ if [ "${use_l2}" = "true" ]; then
+ extra_args="${extra_args} --l2 --etype=0xdead ${vid}"
+ receiver_extra_args="--l2 --etype=0xdead"
+ else
+ extra_args="${extra_args} --l4 --ip-destination=${dst_ip}"
+ receiver_extra_args="--l4"
+ fi
+
+ cpufreq_max ${ISOCHRON_CPU}
+
+ isochron_recv_start "${h2}" "${receiver_uds}" "${receiver_extra_args}"
+
+ isochron send \
+ --interface ${sender_if_name} \
+ --unix-domain-socket ${sender_uds} \
+ --priority ${priority} \
+ --base-time ${base_time} \
+ --cycle-time ${cycle_time} \
+ --num-frames ${num_pkts} \
+ --frame-size 64 \
+ --txtime \
+ --utc-tai-offset ${UTC_TAI_OFFSET} \
+ --cpu-mask $((1 << ${ISOCHRON_CPU})) \
+ --sched-fifo \
+ --sched-priority 98 \
+ --client 127.0.0.1 \
+ --sync-threshold 5000 \
+ --output-file ${isochron_dat} \
+ ${extra_args} \
+ --quiet
+
+ isochron_recv_stop
+
+ cpufreq_restore ${ISOCHRON_CPU}
+}
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index d36b7da5082a..38021a0dd527 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -12,6 +12,9 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NF_TABLES_INET=y
CONFIG_NFT_TPROXY=m
CONFIG_NFT_SOCKET=m
@@ -19,3 +22,8 @@ CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_SCH_INGRESS=m
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index ff821025d309..9dd43d7d957b 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -71,6 +71,43 @@ chk_msk_remote_key_nr()
__chk_nr "grep -c remote_key" $*
}
+__chk_listen()
+{
+ local filter="$1"
+ local expected=$2
+
+ shift 2
+ msg=$*
+
+ nr=$(ss -N $ns -Ml "$filter" | grep -c LISTEN)
+ printf "%-50s" "$msg"
+
+ if [ $nr != $expected ]; then
+ echo "[ fail ] expected $expected found $nr"
+ ret=$test_cnt
+ else
+ echo "[ ok ]"
+ fi
+}
+
+chk_msk_listen()
+{
+ lport=$1
+ local msg="check for listen socket"
+
+ # destination port search should always return empty list
+ __chk_listen "dport $lport" 0 "listen match for dport $lport"
+
+ # should return 'our' mptcp listen socket
+ __chk_listen "sport $lport" 1 "listen match for sport $lport"
+
+ __chk_listen "src inet:0.0.0.0:$lport" 1 "listen match for saddr and sport"
+
+ __chk_listen "" 1 "all listen sockets"
+
+ nr=$(ss -Ml $filter | wc -l)
+}
+
# $1: ns, $2: port
wait_local_port_listen()
{
@@ -113,6 +150,7 @@ echo "a" | \
0.0.0.0 >/dev/null &
wait_local_port_listen $ns 10000
chk_msk_nr 0 "no msk on netns creation"
+chk_msk_listen 10000
echo "b" | \
timeout ${timeout_test} \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 7314257d248a..d1de1e7702fb 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -70,6 +70,7 @@ init_partial()
ip netns add $netns || exit $ksft_skip
ip -net $netns link set lo up
ip netns exec $netns sysctl -q net.mptcp.enabled=1
+ ip netns exec $netns sysctl -q net.mptcp.pm_type=0
ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
if [ $checksum -eq 1 ]; then
@@ -266,6 +267,58 @@ reset_with_allow_join_id0()
ip netns exec $ns2 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns2_enable
}
+# Modify TCP payload without corrupting the TCP packet
+#
+# This rule inverts a 8-bit word at byte offset 148 for the 2nd TCP ACK packets
+# carrying enough data.
+# Once it is done, the TCP Checksum field is updated so the packet is still
+# considered as valid at the TCP level.
+# Because the MPTCP checksum, covering the TCP options and data, has not been
+# updated, the modification will be detected and an MP_FAIL will be emitted:
+# what we want to validate here without corrupting "random" MPTCP options.
+#
+# To avoid having tc producing this pr_info() message for each TCP ACK packets
+# not carrying enough data:
+#
+# tc action pedit offset 162 out of bounds
+#
+# Netfilter is used to mark packets with enough data.
+reset_with_fail()
+{
+ reset "${1}" || return 1
+
+ ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=1
+ ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=1
+
+ check_invert=1
+ validate_checksum=1
+ local i="$2"
+ local ip="${3:-4}"
+ local tables
+
+ tables="iptables"
+ if [ $ip -eq 6 ]; then
+ tables="ip6tables"
+ fi
+
+ ip netns exec $ns2 $tables \
+ -t mangle \
+ -A OUTPUT \
+ -o ns2eth$i \
+ -p tcp \
+ -m length --length 150:9999 \
+ -m statistic --mode nth --packet 1 --every 99999 \
+ -j MARK --set-mark 42 || exit 1
+
+ tc -n $ns2 qdisc add dev ns2eth$i clsact || exit 1
+ tc -n $ns2 filter add dev ns2eth$i egress \
+ protocol ip prio 1000 \
+ handle 42 fw \
+ action pedit munge offset 148 u8 invert \
+ pipe csum tcp \
+ index 100 || exit 1
+}
+
fail_test()
{
ret=1
@@ -961,6 +1014,7 @@ chk_csum_nr()
local csum_ns2=${2:-0}
local count
local dump_stats
+ local extra_msg=""
local allow_multi_errors_ns1=0
local allow_multi_errors_ns2=0
@@ -976,6 +1030,9 @@ chk_csum_nr()
printf "%-${nr_blank}s %s" " " "sum"
count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
[ -z "$count" ] && count=0
+ if [ "$count" != "$csum_ns1" ]; then
+ extra_msg="$extra_msg ns1=$count"
+ fi
if { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
{ [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns1"
@@ -987,28 +1044,58 @@ chk_csum_nr()
echo -n " - csum "
count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
[ -z "$count" ] && count=0
+ if [ "$count" != "$csum_ns2" ]; then
+ extra_msg="$extra_msg ns2=$count"
+ fi
if { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
{ [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns2"
fail_test
dump_stats=1
else
- echo "[ ok ]"
+ echo -n "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
+
+ echo "$extra_msg"
}
chk_fail_nr()
{
local fail_tx=$1
local fail_rx=$2
+ local ns_invert=${3:-""}
local count
local dump_stats
+ local ns_tx=$ns1
+ local ns_rx=$ns2
+ local extra_msg=""
+ local allow_tx_lost=0
+ local allow_rx_lost=0
+
+ if [[ $ns_invert = "invert" ]]; then
+ ns_tx=$ns2
+ ns_rx=$ns1
+ extra_msg=" invert"
+ fi
+
+ if [[ "${fail_tx}" = "-"* ]]; then
+ allow_tx_lost=1
+ fail_tx=${fail_tx:1}
+ fi
+ if [[ "${fail_rx}" = "-"* ]]; then
+ allow_rx_lost=1
+ fail_rx=${fail_rx:1}
+ fi
printf "%-${nr_blank}s %s" " " "ftx"
- count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
+ count=$(ip netns exec $ns_tx nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ "$count" != "$fail_tx" ]; then
+ extra_msg="$extra_msg,tx=$count"
+ fi
+ if { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
fail_test
dump_stats=1
@@ -1017,17 +1104,23 @@ chk_fail_nr()
fi
echo -n " - failrx"
- count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
+ count=$(ip netns exec $ns_rx nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ "$count" != "$fail_rx" ]; then
+ extra_msg="$extra_msg,rx=$count"
+ fi
+ if { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
fail_test
dump_stats=1
else
- echo "[ ok ]"
+ echo -n "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
+
+ echo "$extra_msg"
}
chk_fclose_nr()
@@ -1106,6 +1199,38 @@ chk_rst_nr()
echo "$extra_msg"
}
+chk_infi_nr()
+{
+ local infi_tx=$1
+ local infi_rx=$2
+ local count
+ local dump_stats
+
+ printf "%-${nr_blank}s %s" " " "itx"
+ count=$(ip netns exec $ns2 nstat -as | grep InfiniteMapTx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$infi_tx" ]; then
+ echo "[fail] got $count infinite map[s] TX expected $infi_tx"
+ fail_test
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - infirx"
+ count=$(ip netns exec $ns1 nstat -as | grep InfiniteMapRx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$infi_rx" ]; then
+ echo "[fail] got $count infinite map[s] RX expected $infi_rx"
+ fail_test
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ [ "${dump_stats}" = 1 ] && dump_stats
+}
+
chk_join_nr()
{
local syn_nr=$1
@@ -1115,7 +1240,8 @@ chk_join_nr()
local csum_ns2=${5:-0}
local fail_nr=${6:-0}
local rst_nr=${7:-0}
- local corrupted_pkts=${8:-0}
+ local infi_nr=${8:-0}
+ local corrupted_pkts=${9:-0}
local count
local dump_stats
local with_cookie
@@ -1166,10 +1292,11 @@ chk_join_nr()
echo "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
- if [ $checksum -eq 1 ]; then
+ if [ $validate_checksum -eq 1 ]; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
chk_rst_nr $rst_nr $rst_nr
+ chk_infi_nr $infi_nr $infi_nr
fi
}
@@ -1485,6 +1612,13 @@ wait_attempt_fail()
return 1
}
+set_userspace_pm()
+{
+ local ns=$1
+
+ ip netns exec $ns sysctl -q net.mptcp.pm_type=1
+}
+
subflows_tests()
{
if reset "no JOIN"; then
@@ -2556,6 +2690,90 @@ fastclose_tests()
fi
}
+pedit_action_pkts()
+{
+ tc -n $ns2 -j -s action show action pedit index 100 | \
+ sed 's/.*"packets":\([0-9]\+\),.*/\1/'
+}
+
+fail_tests()
+{
+ # single subflow
+ if reset_with_fail "Infinite map" 1; then
+ run_tests $ns1 $ns2 10.0.1.1 128
+ chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
+ chk_fail_nr 1 -1 invert
+ fi
+}
+
+userspace_tests()
+{
+ # userspace pm type prevents add_addr
+ if reset "userspace pm type prevents add_addr"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 0 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ chk_add_nr 0 0
+ fi
+
+ # userspace pm type does not echo add_addr without daemon
+ if reset "userspace pm no echo w/o daemon"; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 0 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ chk_add_nr 1 0
+ fi
+
+ # userspace pm type rejects join
+ if reset "userspace pm type rejects join"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 0
+ fi
+
+ # userspace pm type does not send join
+ if reset "userspace pm type does not send join"; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ fi
+
+ # userspace pm type prevents mp_prio
+ if reset "userspace pm type prevents mp_prio"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 1 1 0
+ chk_prio_nr 0 0
+ fi
+
+ # userspace pm type prevents rm_addr
+ if reset "userspace pm type prevents rm_addr"; then
+ set_userspace_pm $ns1
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 0 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+ chk_join_nr 0 0 0
+ chk_rm_nr 0 0
+ fi
+}
+
implicit_tests()
{
# userspace pm type prevents add_addr
@@ -2624,6 +2842,8 @@ all_tests_sorted=(
d@deny_join_id0_tests
m@fullmesh_tests
z@fastclose_tests
+ F@fail_tests
+ u@userspace_tests
I@implicit_tests
)
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index a75a68ad652e..6a2f4b981e1d 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -6,6 +6,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <limits.h>
#include <sys/socket.h>
#include <sys/types.h>
@@ -21,17 +22,29 @@
#ifndef MPTCP_PM_NAME
#define MPTCP_PM_NAME "mptcp_pm"
#endif
+#ifndef MPTCP_PM_EVENTS
+#define MPTCP_PM_EVENTS "mptcp_pm_events"
+#endif
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
static void syntax(char *argv[])
{
fprintf(stderr, "%s add|get|set|del|flush|dump|accept [<args>]\n", argv[0]);
fprintf(stderr, "\tadd [flags signal|subflow|backup|fullmesh] [id <nr>] [dev <name>] <ip>\n");
+ fprintf(stderr, "\tann <local-ip> id <local-id> token <token> [port <local-port>] [dev <name>]\n");
+ fprintf(stderr, "\trem id <local-id> token <token>\n");
+ fprintf(stderr, "\tcsf lip <local-ip> lid <local-id> rip <remote-ip> rport <remote-port> token <token>\n");
+ fprintf(stderr, "\tdsf lip <local-ip> lport <local-port> rip <remote-ip> rport <remote-port> token <token>\n");
fprintf(stderr, "\tdel <id> [<ip>]\n");
fprintf(stderr, "\tget <id>\n");
fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>]\n");
fprintf(stderr, "\tflush\n");
fprintf(stderr, "\tdump\n");
fprintf(stderr, "\tlimits [<rcv addr max> <subflow max>]\n");
+ fprintf(stderr, "\tevents\n");
+ fprintf(stderr, "\tlisten <local-ip> <local-port>\n");
exit(0);
}
@@ -83,6 +96,108 @@ static void nl_error(struct nlmsghdr *nh)
}
}
+static int capture_events(int fd, int event_group)
+{
+ u_int8_t buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024];
+ struct genlmsghdr *ghdr;
+ struct rtattr *attrs;
+ struct nlmsghdr *nh;
+ int ret = 0;
+ int res_len;
+ int msg_len;
+ fd_set rfds;
+
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &event_group, sizeof(event_group)) < 0)
+ error(1, errno, "could not join the " MPTCP_PM_EVENTS " mcast group");
+
+ do {
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+ res_len = NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024;
+
+ ret = select(FD_SETSIZE, &rfds, NULL, NULL, NULL);
+
+ if (ret < 0)
+ error(1, ret, "error in select() on NL socket");
+
+ res_len = recv(fd, buffer, res_len, 0);
+ if (res_len < 0)
+ error(1, res_len, "error on recv() from NL socket");
+
+ nh = (struct nlmsghdr *)buffer;
+
+ for (; NLMSG_OK(nh, res_len); nh = NLMSG_NEXT(nh, res_len)) {
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ error(1, NLMSG_ERROR, "received invalid NL message");
+
+ ghdr = (struct genlmsghdr *)NLMSG_DATA(nh);
+
+ if (ghdr->cmd == 0)
+ continue;
+
+ fprintf(stderr, "type:%d", ghdr->cmd);
+
+ msg_len = nh->nlmsg_len - NLMSG_LENGTH(GENL_HDRLEN);
+
+ attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
+ while (RTA_OK(attrs, msg_len)) {
+ if (attrs->rta_type == MPTCP_ATTR_TOKEN)
+ fprintf(stderr, ",token:%u", *(__u32 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_FAMILY)
+ fprintf(stderr, ",family:%u", *(__u16 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_LOC_ID)
+ fprintf(stderr, ",loc_id:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_REM_ID)
+ fprintf(stderr, ",rem_id:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SADDR4) {
+ u_int32_t saddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
+
+ fprintf(stderr, ",saddr4:%u.%u.%u.%u", saddr4 >> 24,
+ (saddr4 >> 16) & 0xFF, (saddr4 >> 8) & 0xFF,
+ (saddr4 & 0xFF));
+ } else if (attrs->rta_type == MPTCP_ATTR_SADDR6) {
+ char buf[INET6_ADDRSTRLEN];
+
+ if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
+ sizeof(buf)) != NULL)
+ fprintf(stderr, ",saddr6:%s", buf);
+ } else if (attrs->rta_type == MPTCP_ATTR_DADDR4) {
+ u_int32_t daddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
+
+ fprintf(stderr, ",daddr4:%u.%u.%u.%u", daddr4 >> 24,
+ (daddr4 >> 16) & 0xFF, (daddr4 >> 8) & 0xFF,
+ (daddr4 & 0xFF));
+ } else if (attrs->rta_type == MPTCP_ATTR_DADDR6) {
+ char buf[INET6_ADDRSTRLEN];
+
+ if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
+ sizeof(buf)) != NULL)
+ fprintf(stderr, ",daddr6:%s", buf);
+ } else if (attrs->rta_type == MPTCP_ATTR_SPORT)
+ fprintf(stderr, ",sport:%u",
+ ntohs(*(__u16 *)RTA_DATA(attrs)));
+ else if (attrs->rta_type == MPTCP_ATTR_DPORT)
+ fprintf(stderr, ",dport:%u",
+ ntohs(*(__u16 *)RTA_DATA(attrs)));
+ else if (attrs->rta_type == MPTCP_ATTR_BACKUP)
+ fprintf(stderr, ",backup:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_ERROR)
+ fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
+
+ attrs = RTA_NEXT(attrs, msg_len);
+ }
+ }
+ fprintf(stderr, "\n");
+ } while (1);
+
+ return 0;
+}
+
/* do a netlink command and, if max > 0, fetch the reply */
static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
{
@@ -116,11 +231,18 @@ static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
return ret;
}
-static int genl_parse_getfamily(struct nlmsghdr *nlh)
+static int genl_parse_getfamily(struct nlmsghdr *nlh, int *pm_family,
+ int *events_mcast_grp)
{
struct genlmsghdr *ghdr = NLMSG_DATA(nlh);
int len = nlh->nlmsg_len;
struct rtattr *attrs;
+ struct rtattr *grps;
+ struct rtattr *grp;
+ int got_events_grp;
+ int got_family;
+ int grps_len;
+ int grp_len;
if (nlh->nlmsg_type != GENL_ID_CTRL)
error(1, errno, "Not a controller message, len=%d type=0x%x\n",
@@ -135,9 +257,42 @@ static int genl_parse_getfamily(struct nlmsghdr *nlh)
error(1, errno, "Unknown controller command %d\n", ghdr->cmd);
attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
+ got_family = 0;
+ got_events_grp = 0;
+
while (RTA_OK(attrs, len)) {
- if (attrs->rta_type == CTRL_ATTR_FAMILY_ID)
- return *(__u16 *)RTA_DATA(attrs);
+ if (attrs->rta_type == CTRL_ATTR_FAMILY_ID) {
+ *pm_family = *(__u16 *)RTA_DATA(attrs);
+ got_family = 1;
+ } else if (attrs->rta_type == CTRL_ATTR_MCAST_GROUPS) {
+ grps = RTA_DATA(attrs);
+ grps_len = RTA_PAYLOAD(attrs);
+
+ while (RTA_OK(grps, grps_len)) {
+ grp = RTA_DATA(grps);
+ grp_len = RTA_PAYLOAD(grps);
+ got_events_grp = 0;
+
+ while (RTA_OK(grp, grp_len)) {
+ if (grp->rta_type == CTRL_ATTR_MCAST_GRP_ID)
+ *events_mcast_grp = *(__u32 *)RTA_DATA(grp);
+ else if (grp->rta_type == CTRL_ATTR_MCAST_GRP_NAME &&
+ !strcmp(RTA_DATA(grp), MPTCP_PM_EVENTS))
+ got_events_grp = 1;
+
+ grp = RTA_NEXT(grp, grp_len);
+ }
+
+ if (got_events_grp)
+ break;
+
+ grps = RTA_NEXT(grps, grps_len);
+ }
+ }
+
+ if (got_family && got_events_grp)
+ return 0;
+
attrs = RTA_NEXT(attrs, len);
}
@@ -145,7 +300,7 @@ static int genl_parse_getfamily(struct nlmsghdr *nlh)
return -1;
}
-static int resolve_mptcp_pm_netlink(int fd)
+static int resolve_mptcp_pm_netlink(int fd, int *pm_family, int *events_mcast_grp)
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
@@ -167,7 +322,421 @@ static int resolve_mptcp_pm_netlink(int fd)
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, sizeof(data));
- return genl_parse_getfamily((void *)data);
+ return genl_parse_getfamily((void *)data, pm_family, events_mcast_grp);
+}
+
+int dsf(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct rtattr *rta, *addr;
+ u_int16_t family, port;
+ struct nlmsghdr *nh;
+ u_int32_t token;
+ int addr_start;
+ int off = 0;
+ int arg;
+
+ const char *params[5];
+
+ memset(params, 0, 5 * sizeof(const char *));
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_DESTROY,
+ MPTCP_PM_VER);
+
+ if (argc < 12)
+ syntax(argv);
+
+ /* Params recorded in this order:
+ * <local-ip>, <local-port>, <remote-ip>, <remote-port>, <token>
+ */
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "lip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local IP");
+
+ params[0] = argv[arg];
+ } else if (!strcmp(argv[arg], "lport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local port");
+
+ params[1] = argv[arg];
+ } else if (!strcmp(argv[arg], "rip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote IP");
+
+ params[2] = argv[arg];
+ } else if (!strcmp(argv[arg], "rport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote port");
+
+ params[3] = argv[arg];
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token");
+
+ params[4] = argv[arg];
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ for (arg = 0; arg < 4; arg = arg + 2) {
+ /* addr header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED |
+ ((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", params[arg]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* port */
+ port = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ addr->rta_len = off - addr_start;
+ }
+
+ /* token */
+ token = atoi(params[4]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
+}
+
+int csf(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ const char *params[5];
+ struct nlmsghdr *nh;
+ struct rtattr *addr;
+ struct rtattr *rta;
+ u_int16_t family;
+ u_int32_t token;
+ u_int16_t port;
+ int addr_start;
+ u_int8_t id;
+ int off = 0;
+ int arg;
+
+ memset(params, 0, 5 * sizeof(const char *));
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_VER);
+
+ if (argc < 12)
+ syntax(argv);
+
+ /* Params recorded in this order:
+ * <local-ip>, <local-id>, <remote-ip>, <remote-port>, <token>
+ */
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "lip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local IP");
+
+ params[0] = argv[arg];
+ } else if (!strcmp(argv[arg], "lid")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local id");
+
+ params[1] = argv[arg];
+ } else if (!strcmp(argv[arg], "rip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote ip");
+
+ params[2] = argv[arg];
+ } else if (!strcmp(argv[arg], "rport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote port");
+
+ params[3] = argv[arg];
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token");
+
+ params[4] = argv[arg];
+ } else
+ error(1, 0, "unknown param %s", argv[arg]);
+ }
+
+ for (arg = 0; arg < 4; arg = arg + 2) {
+ /* addr header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED |
+ ((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", params[arg]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ if (arg == 2) {
+ /* port */
+ port = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ if (arg == 0) {
+ /* id */
+ id = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ addr->rta_len = off - addr_start;
+ }
+
+ /* token */
+ token = atoi(params[4]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
+}
+
+int remove_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct nlmsghdr *nh;
+ struct rtattr *rta;
+ u_int32_t token;
+ u_int8_t id;
+ int off = 0;
+ int arg;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_VER);
+
+ if (argc < 6)
+ syntax(argv);
+
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "id")) {
+ if (++arg >= argc)
+ error(1, 0, " missing id value");
+
+ id = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_LOC_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token value");
+
+ token = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ do_nl_req(fd, nh, off, 0);
+ return 0;
+}
+
+int announce_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ u_int32_t flags = MPTCP_PM_ADDR_FLAG_SIGNAL;
+ u_int32_t token = UINT_MAX;
+ struct rtattr *rta, *addr;
+ u_int32_t id = UINT_MAX;
+ struct nlmsghdr *nh;
+ u_int16_t family;
+ int addr_start;
+ int off = 0;
+ int arg;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_VER);
+
+ if (argc < 7)
+ syntax(argv);
+
+ /* local-ip header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* local-ip data */
+ /* record addr type */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, argv[2], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, argv[2], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", argv[2]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* addr family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ for (arg = 3; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "id")) {
+ /* local-id */
+ if (++arg >= argc)
+ error(1, 0, " missing id value");
+
+ id = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "dev")) {
+ /* for the if_index */
+ int32_t ifindex;
+
+ if (++arg >= argc)
+ error(1, 0, " missing dev name");
+
+ ifindex = if_nametoindex(argv[arg]);
+ if (!ifindex)
+ error(1, errno, "unknown device %s", argv[arg]);
+
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_IF_IDX;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &ifindex, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "port")) {
+ /* local-port (optional) */
+ u_int16_t port;
+
+ if (++arg >= argc)
+ error(1, 0, " missing port value");
+
+ port = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "token")) {
+ /* MPTCP connection token */
+ if (++arg >= argc)
+ error(1, 0, " missing token value");
+
+ token = atoi(argv[arg]);
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ /* addr flags */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &flags, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ addr->rta_len = off - addr_start;
+
+ if (id == UINT_MAX || token == UINT_MAX)
+ error(1, 0, " missing mandatory inputs");
+
+ /* token */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
}
int add_addr(int fd, int pm_family, int argc, char *argv[])
@@ -654,6 +1223,54 @@ int get_set_limits(int fd, int pm_family, int argc, char *argv[])
return 0;
}
+int add_listener(int argc, char *argv[])
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 *a6;
+ struct sockaddr_in *a4;
+ u_int16_t family;
+ int enable = 1;
+ int sock;
+ int err;
+
+ if (argc < 4)
+ syntax(argv);
+
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ a4 = (struct sockaddr_in *)&addr;
+ a6 = (struct sockaddr_in6 *)&addr;
+
+ if (inet_pton(AF_INET, argv[2], &a4->sin_addr)) {
+ family = AF_INET;
+ a4->sin_family = family;
+ a4->sin_port = htons(atoi(argv[3]));
+ } else if (inet_pton(AF_INET6, argv[2], &a6->sin6_addr)) {
+ family = AF_INET6;
+ a6->sin6_family = family;
+ a6->sin6_port = htons(atoi(argv[3]));
+ } else
+ error(1, errno, "can't parse ip %s", argv[2]);
+
+ sock = socket(family, SOCK_STREAM, IPPROTO_MPTCP);
+ if (sock < 0)
+ error(1, errno, "can't create listener sock\n");
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable))) {
+ close(sock);
+ error(1, errno, "can't set SO_REUSEADDR on listener sock\n");
+ }
+
+ err = bind(sock, (struct sockaddr *)&addr,
+ ((family == AF_INET) ? sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6)));
+
+ if (err == 0 && listen(sock, 30) == 0)
+ pause();
+
+ close(sock);
+ return 0;
+}
+
int set_flags(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
@@ -773,7 +1390,9 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
int main(int argc, char *argv[])
{
- int fd, pm_family;
+ int events_mcast_grp;
+ int pm_family;
+ int fd;
if (argc < 2)
syntax(argv);
@@ -782,10 +1401,18 @@ int main(int argc, char *argv[])
if (fd == -1)
error(1, errno, "socket netlink");
- pm_family = resolve_mptcp_pm_netlink(fd);
+ resolve_mptcp_pm_netlink(fd, &pm_family, &events_mcast_grp);
if (!strcmp(argv[1], "add"))
return add_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "ann"))
+ return announce_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "rem"))
+ return remove_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "csf"))
+ return csf(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "dsf"))
+ return dsf(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "del"))
return del_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "flush"))
@@ -798,6 +1425,10 @@ int main(int argc, char *argv[])
return get_set_limits(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "set"))
return set_flags(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "events"))
+ return capture_events(fd, events_mcast_grp);
+ else if (!strcmp(argv[1], "listen"))
+ return add_listener(argc, argv);
fprintf(stderr, "unknown sub-command: %s", argv[1]);
syntax(argv);
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
new file mode 100755
index 000000000000..78d0bb640b11
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -0,0 +1,779 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Cannot not run test without ip tool"
+ exit 1
+fi
+
+ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
+REMOVED=7 # MPTCP_EVENT_REMOVED
+SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+
+AF_INET=2
+AF_INET6=10
+
+evts_pid=0
+client4_pid=0
+server4_pid=0
+client6_pid=0
+server6_pid=0
+client4_token=""
+server4_token=""
+client6_token=""
+server6_token=""
+client4_port=0;
+client6_port=0;
+app4_port=50002
+new4_port=50003
+app6_port=50004
+client_addr_id=${RANDOM:0:2}
+server_addr_id=${RANDOM:0:2}
+
+sec=$(date +%s)
+rndh=$(stdbuf -o0 -e0 printf %x "$sec")-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+
+cleanup()
+{
+ echo "cleanup"
+
+ rm -rf $file
+
+ # Terminate the MPTCP connection and related processes
+ if [ $client4_pid -ne 0 ]; then
+ kill -SIGUSR1 $client4_pid > /dev/null 2>&1
+ fi
+ if [ $server4_pid -ne 0 ]; then
+ kill $server4_pid > /dev/null 2>&1
+ fi
+ if [ $client6_pid -ne 0 ]; then
+ kill -SIGUSR1 $client6_pid > /dev/null 2>&1
+ fi
+ if [ $server6_pid -ne 0 ]; then
+ kill $server6_pid > /dev/null 2>&1
+ fi
+ if [ $evts_pid -ne 0 ]; then
+ kill $evts_pid > /dev/null 2>&1
+ fi
+ local netns
+ for netns in "$ns1" "$ns2" ;do
+ ip netns del "$netns"
+ done
+}
+
+trap cleanup EXIT
+
+# Create and configure network namespaces for testing
+for i in "$ns1" "$ns2" ;do
+ ip netns add "$i" || exit 1
+ ip -net "$i" link set lo up
+ ip netns exec "$i" sysctl -q net.mptcp.enabled=1
+ ip netns exec "$i" sysctl -q net.mptcp.pm_type=1
+done
+
+# "$ns1" ns2
+# ns1eth2 ns2eth1
+
+ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+
+# Add IPv4/v6 addresses to the namespaces
+ip -net "$ns1" addr add 10.0.1.1/24 dev ns1eth2
+ip -net "$ns1" addr add 10.0.2.1/24 dev ns1eth2
+ip -net "$ns1" addr add dead:beef:1::1/64 dev ns1eth2 nodad
+ip -net "$ns1" addr add dead:beef:2::1/64 dev ns1eth2 nodad
+ip -net "$ns1" link set ns1eth2 up
+
+ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
+ip -net "$ns2" addr add 10.0.2.2/24 dev ns2eth1
+ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
+ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth1 nodad
+ip -net "$ns2" link set ns2eth1 up
+
+stdbuf -o0 -e0 printf "Created network namespaces ns1, ns2 \t\t\t[OK]\n"
+
+make_file()
+{
+ # Store a chunk of data in a file to transmit over an MPTCP connection
+ local name=$1
+ local ksize=1
+
+ dd if=/dev/urandom of="$name" bs=2 count=$ksize 2> /dev/null
+ echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+}
+
+make_connection()
+{
+ local file
+ file=$(mktemp)
+ make_file "$file" "client"
+
+ local is_v6=$1
+ local app_port=$app4_port
+ local connect_addr="10.0.1.1"
+ local listen_addr="0.0.0.0"
+ if [ "$is_v6" = "v6" ]
+ then
+ connect_addr="dead:beef:1::1"
+ listen_addr="::"
+ app_port=$app6_port
+ else
+ is_v6="v4"
+ fi
+
+ # Capture netlink events over the two network namespaces running
+ # the MPTCP client and server
+ local client_evts
+ client_evts=$(mktemp)
+ :>"$client_evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
+ local client_evts_pid=$!
+ local server_evts
+ server_evts=$(mktemp)
+ :>"$server_evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
+ local server_evts_pid=$!
+ sleep 0.5
+
+ # Run the server
+ ip netns exec "$ns1" \
+ ./mptcp_connect -s MPTCP -w 300 -p $app_port -l $listen_addr > /dev/null 2>&1 &
+ local server_pid=$!
+ sleep 0.5
+
+ # Run the client, transfer $file and stay connected to the server
+ # to conduct tests
+ ip netns exec "$ns2" \
+ ./mptcp_connect -s MPTCP -w 300 -m sendfile -p $app_port $connect_addr\
+ 2>&1 > /dev/null < "$file" &
+ local client_pid=$!
+ sleep 1
+
+ # Capture client/server attributes from MPTCP connection netlink events
+ kill $client_evts_pid
+
+ local client_token
+ local client_port
+ local client_serverside
+ local server_token
+ local server_serverside
+
+ client_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$client_evts")
+ kill $server_evts_pid
+ server_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ server_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$server_evts")
+ rm -f "$client_evts" "$server_evts" "$file"
+
+ if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+ [ "$server_serverside" = 1 ]
+ then
+ stdbuf -o0 -e0 printf "Established IP%s MPTCP Connection ns2 => ns1 \t\t[OK]\n" $is_v6
+ else
+ exit 1
+ fi
+
+ if [ "$is_v6" = "v6" ]
+ then
+ client6_token=$client_token
+ server6_token=$server_token
+ client6_port=$client_port
+ client6_pid=$client_pid
+ server6_pid=$server_pid
+ else
+ client4_token=$client_token
+ server4_token=$server_token
+ client4_port=$client_port
+ client4_pid=$client_pid
+ server4_pid=$server_pid
+ fi
+}
+
+verify_announce_event()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_addr=$4
+ local e_id=$5
+ local e_dport=$6
+ local e_af=$7
+ local type
+ local token
+ local addr
+ local dport
+ local id
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$e_af" = "v6" ]
+ then
+ addr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ else
+ addr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ fi
+ dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$addr" = "$e_addr" ] && [ "$dport" = "$e_dport" ] &&
+ [ "$id" = "$e_id" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_announce()
+{
+ local evts
+ evts=$(mktemp)
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # ADD_ADDR using an invalid token should result in no action
+ local invalid_token=$(( client4_token - 1))
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token $invalid_token id\
+ $client_addr_id dev ns2eth1 > /dev/null 2>&1
+
+ local type
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ stdbuf -o0 -e0 printf "ADD_ADDR 10.0.2.2 (ns2) => ns1, invalid token \t\t"
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+ fi
+
+ # ADD_ADDR from the client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2"\
+ ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
+ ns2eth1 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, reuse port \t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2" "$client_addr_id"\
+ "$client4_port"
+
+ # ADD_ADDR6 from the client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann\
+ dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::2 (ns2) => ns1, reuse port\t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
+ "$client_addr_id" "$client6_port" "v6"
+
+ # ADD_ADDR from the client to server machine using a new port
+ :>"$evts"
+ client_addr_id=$((client_addr_id+1))
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id dev ns2eth1 port $new4_port > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, new port \t\t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
+ "$client_addr_id" "$new4_port"
+
+ kill $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # ADD_ADDR from the server to client machine reusing the subflow port
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, reuse port \t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$app4_port"
+
+ # ADD_ADDR6 from the server to client machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ $server_addr_id dev ns1eth2 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::1 (ns1) => ns2, reuse port\t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
+ "$server_addr_id" "$app6_port" "v6"
+
+ # ADD_ADDR from the server to client machine using a new port
+ :>"$evts"
+ server_addr_id=$((server_addr_id+1))
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2 port $new4_port > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, new port \t\t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$new4_port"
+
+ kill $evts_pid
+ rm -f "$evts"
+}
+
+verify_remove_event()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_id=$4
+ local type
+ local token
+ local id
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$id" = "$e_id" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_remove()
+{
+ local evts
+ evts=$(mktemp)
+
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # RM_ADDR using an invalid token should result in no action
+ local invalid_token=$(( client4_token - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid token \t"\
+ $client_addr_id
+ local type
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ fi
+
+ # RM_ADDR using an invalid addr id should result in no action
+ local invalid_id=$(( client_addr_id + 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $invalid_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid id \t"\
+ $invalid_id
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ fi
+
+ # RM_ADDR from the client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+ # RM_ADDR from the client to server machine
+ :>"$evts"
+ client_addr_id=$(( client_addr_id - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+ # RM_ADDR6 from the client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server6_token" "$client_addr_id"
+
+ kill $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # RM_ADDR from the server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t"\
+ $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+ # RM_ADDR from the server to client machine
+ :>"$evts"
+ server_addr_id=$(( server_addr_id - 1 ))
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t" $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+ # RM_ADDR6 from the server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns1 => ns2 \t" $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client6_token" "$server_addr_id"
+
+ kill $evts_pid
+ rm -f "$evts"
+}
+
+verify_subflow_events()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_family=$4
+ local e_saddr=$5
+ local e_daddr=$6
+ local e_dport=$7
+ local e_locid=$8
+ local e_remid=$9
+ shift 2
+ local e_from=$8
+ local e_to=$9
+ local type
+ local token
+ local family
+ local saddr
+ local daddr
+ local dport
+ local locid
+ local remid
+
+ if [ "$e_type" = "$SUB_ESTABLISHED" ]
+ then
+ if [ "$e_family" = "$AF_INET6" ]
+ then
+ stdbuf -o0 -e0 printf "CREATE_SUBFLOW6 %s (%s) => %s (%s) "\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ else
+ stdbuf -o0 -e0 printf "CREATE_SUBFLOW %s (%s) => %s (%s) \t"\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ fi
+ else
+ if [ "$e_family" = "$AF_INET6" ]
+ then
+ stdbuf -o0 -e0 printf "DESTROY_SUBFLOW6 %s (%s) => %s (%s) "\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ else
+ stdbuf -o0 -e0 printf "DESTROY_SUBFLOW %s (%s) => %s (%s) \t"\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ fi
+ fi
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ family=$(sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ locid=$(sed --unbuffered -n 's/.*\(loc_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ remid=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$family" = "$AF_INET6" ]
+ then
+ saddr=$(sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ daddr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ else
+ saddr=$(sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ daddr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ fi
+
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$daddr" = "$e_daddr" ] && [ "$e_dport" = "$dport" ] &&
+ [ "$family" = "$e_family" ] && [ "$saddr" = "$e_saddr" ] &&
+ [ "$e_locid" = "$locid" ] && [ "$e_remid" = "$remid" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_subflows()
+{
+ local evts
+ evts=$(mktemp)
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.2:<subflow-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen 10.0.2.2\
+ "$client4_port" > /dev/null 2>&1 &
+ local listener_pid=$!
+
+ # ADD_ADDR from client to server machine reusing the subflow port
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2\
+ rport "$client4_port" token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill $listener_pid > /dev/null 2>&1
+
+ local sport
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
+ "$client4_port" token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at dead:beef:2::2:<subflow-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen dead:beef:2::2\
+ "$client6_port" > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR6 from client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann dead:beef:2::2 token "$client6_token" id\
+ $client_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW6 from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip dead:beef:2::1 lid 23 rip\
+ dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server6_token" "$AF_INET6"\
+ "dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill $listener_pid > /dev/null 2>&1
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW6 from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip dead:beef:2::1 lport "$sport" rip\
+ dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server6_token" "$AF_INET6"\
+ "dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.2:<new-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen 10.0.2.2\
+ $new4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from client to server machine using a new port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id port $new4_port > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2 rport\
+ $new4_port token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET"\
+ "10.0.2.1" "10.0.2.2" "$new4_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill $listener_pid > /dev/null 2>&1
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
+ $new4_port token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$new4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client4_token" > /dev/null 2>&1
+
+ kill $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.1:<subflow-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+ $app4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from server to client machine reusing the subflow port
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
+ $app4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill $listener_pid> /dev/null 2>&1
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
+ $app4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at dead:beef:2::1:<subflow-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen dead:beef:2::1\
+ $app6_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR6 from server to client machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ $server_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW6 from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip dead:beef:2::2 lid 23 rip\
+ dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client6_token"\
+ "$AF_INET6" "dead:beef:2::2"\
+ "dead:beef:2::1" "$app6_port" "23"\
+ "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill $listener_pid > /dev/null 2>&1
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW6 from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip dead:beef:2::2 lport "$sport" rip\
+ dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client6_token" "$AF_INET6" "dead:beef:2::2"\
+ "dead:beef:2::1" "$app6_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR6 from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.1:<new-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+ $new4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from server to client machine using a new port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id port $new4_port > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
+ $new4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET"\
+ "10.0.2.2" "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill $listener_pid > /dev/null 2>&1
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
+ $new4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server4_token" > /dev/null 2>&1
+
+ kill $evts_pid
+ rm -f "$evts"
+}
+
+make_connection
+make_connection "v6"
+test_announce
+test_remove
+test_subflows
+
+exit 0
diff --git a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
new file mode 100755
index 000000000000..f508657ee126
--- /dev/null
+++ b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
@@ -0,0 +1,255 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for the accept_unsolicited_na feature to
+# enable RFC9131 behaviour. The following is the test-matrix.
+# drop accept fwding behaviour
+# ---- ------ ------ ----------------------------------------------
+# 1 X X Drop NA packet and don't pass up the stack
+# 0 0 X Pass NA packet up the stack, don't update NC
+# 0 1 0 Pass NA packet up the stack, don't update NC
+# 0 1 1 Pass NA packet up the stack, and add a STALE
+# NC entry
+
+ret=0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+PAUSE_ON_FAIL=no
+PAUSE=no
+
+HOST_NS="ns-host"
+ROUTER_NS="ns-router"
+
+HOST_INTF="veth-host"
+ROUTER_INTF="veth-router"
+
+ROUTER_ADDR="2000:20::1"
+HOST_ADDR="2000:20::2"
+SUBNET_WIDTH=64
+ROUTER_ADDR_WITH_MASK="${ROUTER_ADDR}/${SUBNET_WIDTH}"
+HOST_ADDR_WITH_MASK="${HOST_ADDR}/${SUBNET_WIDTH}"
+
+IP_HOST="ip -6 -netns ${HOST_NS}"
+IP_HOST_EXEC="ip netns exec ${HOST_NS}"
+IP_ROUTER="ip -6 -netns ${ROUTER_NS}"
+IP_ROUTER_EXEC="ip netns exec ${ROUTER_NS}"
+
+tcpdump_stdout=
+tcpdump_stderr=
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf " TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf " TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+
+ if [ "${PAUSE}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+}
+
+setup()
+{
+ set -e
+
+ local drop_unsolicited_na=$1
+ local accept_unsolicited_na=$2
+ local forwarding=$3
+
+ # Setup two namespaces and a veth tunnel across them.
+ # On end of the tunnel is a router and the other end is a host.
+ ip netns add ${HOST_NS}
+ ip netns add ${ROUTER_NS}
+ ${IP_ROUTER} link add ${ROUTER_INTF} type veth \
+ peer name ${HOST_INTF} netns ${HOST_NS}
+
+ # Enable IPv6 on both router and host, and configure static addresses.
+ # The router here is the DUT
+ # Setup router configuration as specified by the arguments.
+ # forwarding=0 case is to check that a non-router
+ # doesn't add neighbour entries.
+ ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.forwarding=${forwarding}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.accept_unsolicited_na=${accept_unsolicited_na}
+ ${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0
+ ${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF}
+
+ # Turn on ndisc_notify on host interface so that
+ # the host sends unsolicited NAs.
+ HOST_CONF=net.ipv6.conf.${HOST_INTF}
+ ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.ndisc_notify=1
+ ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.disable_ipv6=0
+ ${IP_HOST} addr add ${HOST_ADDR_WITH_MASK} dev ${HOST_INTF}
+
+ set +e
+}
+
+start_tcpdump() {
+ set -e
+ tcpdump_stdout=`mktemp`
+ tcpdump_stderr=`mktemp`
+ ${IP_ROUTER_EXEC} timeout 15s \
+ tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \
+ "icmp6 && icmp6[0] == 136 && src ${HOST_ADDR}" \
+ > ${tcpdump_stdout} 2> /dev/null
+ set +e
+}
+
+cleanup_tcpdump()
+{
+ set -e
+ [[ ! -z ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout}
+ [[ ! -z ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr}
+ tcpdump_stdout=
+ tcpdump_stderr=
+ set +e
+}
+
+cleanup()
+{
+ cleanup_tcpdump
+ ip netns del ${HOST_NS}
+ ip netns del ${ROUTER_NS}
+}
+
+link_up() {
+ set -e
+ ${IP_ROUTER} link set dev ${ROUTER_INTF} up
+ ${IP_HOST} link set dev ${HOST_INTF} up
+ set +e
+}
+
+verify_ndisc() {
+ local drop_unsolicited_na=$1
+ local accept_unsolicited_na=$2
+ local forwarding=$3
+
+ neigh_show_output=$(${IP_ROUTER} neigh show \
+ to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale)
+ if [ ${drop_unsolicited_na} -eq 0 ] && \
+ [ ${accept_unsolicited_na} -eq 1 ] && \
+ [ ${forwarding} -eq 1 ]; then
+ # Neighbour entry expected to be present for 011 case
+ [[ ${neigh_show_output} ]]
+ else
+ # Neighbour entry expected to be absent for all other cases
+ [[ -z ${neigh_show_output} ]]
+ fi
+}
+
+test_unsolicited_na_common()
+{
+ # Setup the test bed, but keep links down
+ setup $1 $2 $3
+
+ # Bring the link up, wait for the NA,
+ # and add a delay to ensure neighbour processing is done.
+ link_up
+ start_tcpdump
+
+ # Verify the neighbour table
+ verify_ndisc $1 $2 $3
+
+}
+
+test_unsolicited_na_combination() {
+ test_unsolicited_na_common $1 $2 $3
+ test_msg=("test_unsolicited_na: "
+ "drop_unsolicited_na=$1 "
+ "accept_unsolicited_na=$2 "
+ "forwarding=$3")
+ log_test $? 0 "${test_msg[*]}"
+ cleanup
+}
+
+test_unsolicited_na_combinations() {
+ # Args: drop_unsolicited_na accept_unsolicited_na forwarding
+
+ # Expect entry
+ test_unsolicited_na_combination 0 1 1
+
+ # Expect no entry
+ test_unsolicited_na_combination 0 0 0
+ test_unsolicited_na_combination 0 0 1
+ test_unsolicited_na_combination 0 1 0
+ test_unsolicited_na_combination 1 0 0
+ test_unsolicited_na_combination 1 0 1
+ test_unsolicited_na_combination 1 1 0
+ test_unsolicited_na_combination 1 1 1
+}
+
+###############################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+ -p Pause on fail
+ -P Pause after each test before cleanup
+EOF
+}
+
+###############################################################################
+# main
+
+while getopts :pPh o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ P) PAUSE=yes;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+# make sure we don't pause twice
+[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip;
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v tcpdump)" ]; then
+ echo "SKIP: Could not run test without tcpdump tool"
+ exit $ksft_skip
+fi
+
+# start clean
+cleanup &> /dev/null
+
+test_unsolicited_na_combinations
+
+printf "\nTests passed: %3d\n" ${nsuccess}
+printf "Tests failed: %3d\n" ${nfail}
+
+exit $ret
diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh
index 865d53c1781c..417d214264f3 100755
--- a/tools/testing/selftests/net/vrf_strict_mode_test.sh
+++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh
@@ -14,6 +14,8 @@ INIT_NETNS_NAME="init"
PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+TESTS="init testns mix"
+
log_test()
{
local rc=$1
@@ -262,6 +264,8 @@ cleanup()
vrf_strict_mode_tests_init()
{
+ log_section "VRF strict_mode test on init network namespace"
+
vrf_strict_mode_check_support init
strict_mode_check_default init
@@ -292,6 +296,8 @@ vrf_strict_mode_tests_init()
vrf_strict_mode_tests_testns()
{
+ log_section "VRF strict_mode test on testns network namespace"
+
vrf_strict_mode_check_support testns
strict_mode_check_default testns
@@ -318,6 +324,8 @@ vrf_strict_mode_tests_testns()
vrf_strict_mode_tests_mix()
{
+ log_section "VRF strict_mode test mixing init and testns network namespaces"
+
read_strict_mode_compare_and_check init 1
read_strict_mode_compare_and_check testns 0
@@ -341,18 +349,30 @@ vrf_strict_mode_tests_mix()
read_strict_mode_compare_and_check testns 0
}
-vrf_strict_mode_tests()
-{
- log_section "VRF strict_mode test on init network namespace"
- vrf_strict_mode_tests_init
+################################################################################
+# usage
- log_section "VRF strict_mode test on testns network namespace"
- vrf_strict_mode_tests_testns
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
- log_section "VRF strict_mode test mixing init and testns network namespaces"
- vrf_strict_mode_tests_mix
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+EOF
}
+################################################################################
+# main
+
+while getopts ":t:h" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
vrf_strict_mode_check_support()
{
local nsname=$1
@@ -391,7 +411,17 @@ fi
cleanup &> /dev/null
setup
-vrf_strict_mode_tests
+for t in $TESTS
+do
+ case $t in
+ vrf_strict_mode_tests_init|init) vrf_strict_mode_tests_init;;
+ vrf_strict_mode_tests_testns|testns) vrf_strict_mode_tests_testns;;
+ vrf_strict_mode_tests_mix|mix) vrf_strict_mode_tests_mix;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+
+ esac
+done
cleanup
print_log_test_results
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
index 695a1958723f..fd76b69635a4 100755
--- a/tools/testing/selftests/netfilter/nft_fib.sh
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -66,6 +66,20 @@ table inet filter {
EOF
}
+load_pbr_ruleset() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain forward {
+ type filter hook forward priority raw;
+ fib saddr . iif oif gt 0 accept
+ log drop
+ }
+}
+EOF
+}
+
load_ruleset_count() {
local netns=$1
@@ -219,4 +233,40 @@ sleep 2
ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+# delete all rules
+ip netns exec ${ns1} nft flush ruleset
+ip netns exec ${ns2} nft flush ruleset
+ip netns exec ${nsrouter} nft flush ruleset
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr del 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr del dead:2::99/64 dev eth0
+
+ip -net ${nsrouter} addr del dead:2::1/64 dev veth0
+
+# ... pbr ruleset for the router, check iif+oif.
+load_pbr_ruleset ${nsrouter}
+if [ $? -ne 0 ] ; then
+ echo "SKIP: Could not load fib forward ruleset"
+ exit $ksft_skip
+fi
+
+ip -net ${nsrouter} rule add from all table 128
+ip -net ${nsrouter} rule add from all iif veth0 table 129
+ip -net ${nsrouter} route add table 128 to 10.0.1.0/24 dev veth0
+ip -net ${nsrouter} route add table 129 to 10.0.2.0/24 dev veth1
+
+# drop main ipv4 table
+ip -net ${nsrouter} -4 rule delete table main
+
+test_ping 10.0.2.99 dead:2::99
+if [ $? -ne 0 ] ; then
+ ip -net ${nsrouter} nft list ruleset
+ echo "FAIL: fib mismatch in pbr setup"
+ exit 1
+fi
+
+echo "PASS: fib expression forward check with policy based routing"
exit 0
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 19515dcb7d04..f50778a3d744 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -40,6 +40,7 @@ ALL_TESTS="$ALL_TESTS 0004:1:1:uint_0001"
ALL_TESTS="$ALL_TESTS 0005:3:1:int_0003"
ALL_TESTS="$ALL_TESTS 0006:50:1:bitmap_0001"
ALL_TESTS="$ALL_TESTS 0007:1:1:boot_int"
+ALL_TESTS="$ALL_TESTS 0008:1:1:match_int"
function allow_user_defaults()
{
@@ -785,6 +786,27 @@ sysctl_test_0007()
return $ksft_skip
}
+sysctl_test_0008()
+{
+ TARGET="${SYSCTL}/match_int"
+ if [ ! -f $TARGET ]; then
+ echo "Skipping test for $TARGET as it is not present ..."
+ return $ksft_skip
+ fi
+
+ echo -n "Testing if $TARGET is matched in kernel"
+ ORIG_VALUE=$(cat "${TARGET}")
+
+ if [ $ORIG_VALUE -ne 1 ]; then
+ echo "TEST FAILED"
+ rc=1
+ test_rc
+ fi
+
+ echo "ok"
+ return 0
+}
+
list_tests()
{
echo "Test ID list:"
@@ -800,6 +822,7 @@ list_tests()
echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array"
echo "0006 x $(get_test_count 0006) - tests proc_do_large_bitmap()"
echo "0007 x $(get_test_count 0007) - tests setting sysctl from kernel boot param"
+ echo "0008 x $(get_test_count 0008) - tests sysctl macro values match"
}
usage()